path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
Colab.ipynb | ###Markdown
Required dependecies
###Code
!pip install --upgrade pip
!pip install --upgrade tensorflow
!pip install numpy --upgrade
!pip install -q kaggle
!pip install zipfile36
!mkdir checkpoints
!mkdir -p ~/.kaggle
!cp kaggle.json ~/.kaggle/
!chmod 600 ~/.kaggle/kaggle.json
!kaggle datasets download -d moltean/fruits
import zipfile
with zipfile.ZipFile('fruits.zip', 'r') as zipObj:
zipObj.extractall()
###Output
_____no_output_____
###Markdown
**Extraction of types from "fruits-360" dataset**
###Code
!mkdir fruits-360-type
!mkdir fruits-360-type/Test
!mkdir fruits-360-type/Training
import shutil
import os
soruce_dir_test="fruits-360_dataset/fruits-360/Test/"
soruce_dir_training="fruits-360_dataset/fruits-360/Training/"
test_dir="fruits-360-type/Test/"
train_dir="fruits-360-type/Training/"
def extract_types(directory, destination):
type_counter={'apple':0,'banana':0,'cherry':0,'grape':0,'peach':0,'pear':0,'pepper':0,'plum':0,'potato':0,'tomato':0}
for label_type in ['apple','banana','cherry','grape','peach','pear','pepper','plum','potato','tomato']:
for label_variety in os.listdir(directory):
if(label_variety.lower().find(label_type)==0 and label_variety.lower().find("grapefruit")!=0):
image_list=os.listdir(os.path.join(directory,label_variety))
for image in image_list:
shutil.copy(os.path.join(directory,label_variety,image), os.path.join(destination,label_type,f"{label_type}_{type_counter[label_type]}.jpg"))
type_counter[label_type]+=1
else:
continue
for label in ['apple','banana','cherry','grape','peach','pear','pepper','plum','potato','tomato']:
os.system(f"mkdir {os.path.join(test_dir,label)}")
os.system(f"mkdir {os.path.join(train_dir,label)}")
extract_types(soruce_dir_test, test_dir)
extract_types(soruce_dir_training, train_dir)
###Output
_____no_output_____
###Markdown
Modules
###Code
import numpy as np
import os
import tensorflow as tf
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow import keras
from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, Dropout
###Output
_____no_output_____
###Markdown
**Classification based on fruit and vegetable types**
###Code
train_data=[]
train_label=[]
test_data=[]
test_label=[]
epochs=10
batch_size=16
optimizer="adam"
img_size = (32, 32)
shape = (32, 32, 3)
test_dir = 'fruits-360-type/Test'
train_dir = 'fruits-360-type/Training'
metrics=['accuracy']
labels = os.listdir(train_dir)
num_classes = len(labels)
def calculate_class_weights(train_label):
train_size=len(train_label)
weights={}
for i in range(10):
weights[i]=0
for i in train_label:
weights[i]=weights[i]+1
for i in range(num_classes):
weights[i]=train_size/(weights[i]*2.)
return weights
def load_data(dir):
i=0
data=[]
data_label=[]
for label in labels:
images=os.listdir(os.path.join(dir,label))
for image in images:
image_path=os.path.join(dir,label,image)
image=load_img(image_path,color_mode='rgb', target_size=img_size)
array_image=img_to_array(image)
array_image=array_image/255.0
data.append(array_image)
data_label.append(i)
i=i+1
tmp=list(zip(data, data_label))
data, data_label=zip(*tmp)
return data, data_label
print("Loading training images...")
train_data, train_label=load_data(train_dir)
train_data=np.array(train_data)
train_label=np.array(train_label)
print("Loading test images...")
test_data, test_label=load_data(test_dir)
test_data=np.array(test_data)
test_label=np.array(test_label)
network=tf.keras.models.Sequential()
network.add(Input(shape=shape))
network.add(Conv2D(32, (3, 3), padding="same", activation="relu", strides=(1, 1)))
network.add(MaxPooling2D((2, 2), strides=(2, 2)))
network.add(Conv2D(64, (3, 3), padding="same", activation="relu", strides=(1, 1)))
network.add(MaxPooling2D((2, 2), strides=(2, 2)))
network.add(Conv2D(128, (3, 3), padding="same", activation="relu", strides=(1, 1)))
network.add(MaxPooling2D((2, 2), strides=(2, 2)))
network.add(Flatten())
network.add(Dense(512, activation="softmax"))
network.compile(optimizer=optimizer, loss="sparse_categorical_crossentropy", metrics=metrics)
print(network.summary())
weights=calculate_class_weights(train_label)
history = network.fit(x=train_data,y=train_label,
batch_size=batch_size,
epochs=epochs,
verbose=1)
train_loss, train_accuracy = network.evaluate(x=train_data,y=train_label,steps=(len(train_data)//batch_size)+1, verbose=1)
test_loss, test_accuracy = network.evaluate(x=test_data, y=test_label,steps=(len(test_data)//batch_size)+1, verbose=1)
print(f"Train accuracy = {train_accuracy}")
print(f"Test accuracy = {test_accuracy}")
###Output
Loading training images...
Loading test images...
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 32) 896
max_pooling2d (MaxPooling2D (None, 16, 16, 32) 0
)
conv2d_1 (Conv2D) (None, 16, 16, 64) 18496
max_pooling2d_1 (MaxPooling (None, 8, 8, 64) 0
2D)
conv2d_2 (Conv2D) (None, 8, 8, 128) 73856
max_pooling2d_2 (MaxPooling (None, 4, 4, 128) 0
2D)
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
=================================================================
Total params: 1,142,336
Trainable params: 1,142,336
Non-trainable params: 0
_________________________________________________________________
None
Epoch 1/10
2038/2038 [==============================] - 25s 7ms/step - loss: 0.2785 - accuracy: 0.9110
Epoch 2/10
2038/2038 [==============================] - 14s 7ms/step - loss: 0.0189 - accuracy: 0.9944
Epoch 3/10
2038/2038 [==============================] - 15s 7ms/step - loss: 0.0111 - accuracy: 0.9967
Epoch 4/10
2038/2038 [==============================] - 14s 7ms/step - loss: 0.0184 - accuracy: 0.9949
Epoch 5/10
2038/2038 [==============================] - 14s 7ms/step - loss: 0.0067 - accuracy: 0.9982
Epoch 6/10
2038/2038 [==============================] - 14s 7ms/step - loss: 3.8275e-05 - accuracy: 1.0000
Epoch 7/10
2038/2038 [==============================] - 14s 7ms/step - loss: 0.0164 - accuracy: 0.9957
Epoch 8/10
2038/2038 [==============================] - 14s 7ms/step - loss: 0.0031 - accuracy: 0.9993
Epoch 9/10
2038/2038 [==============================] - 14s 7ms/step - loss: 3.2110e-05 - accuracy: 1.0000
Epoch 10/10
2038/2038 [==============================] - 15s 7ms/step - loss: 7.9446e-06 - accuracy: 1.0000
2038/2038 [==============================] - 9s 4ms/step - loss: 4.2164e-06 - accuracy: 1.0000
682/682 [==============================] - 3s 4ms/step - loss: 0.0362 - accuracy: 0.9881
Train accuracy = 1.0
Test accuracy = 0.9880799651145935
###Markdown
**Classification based on fruit and vegetable varieties**
###Code
train_data=[]
train_label=[]
test_data=[]
test_label=[]
epochs=20
batch_size=16
optimizer="adam"
img_size = (32, 32)
shape = (32, 32, 3)
test_dir = 'fruits-360_dataset/fruits-360/Test'
train_dir = 'fruits-360_dataset/fruits-360/Training'
metrics=['accuracy']
labels = os.listdir(train_dir)
print(labels)
num_classes = len(labels)
def calculate_class_weights(train_label, num_classes):
train_size=len(train_label)
weights={}
for i in range(num_classes):
weights[i]=0
for i in train_label:
weights[i]=weights[i]+1
for i in range(num_classes):
weights[i]=train_size/(weights[i]*2.)
return weights
def load_data(dir, labels):
i=0
data=[]
data_label=[]
for label in labels:
images=os.listdir(os.path.join(dir,label))
for image in images:
image_path=os.path.join(dir,label,image)
image=load_img(image_path,color_mode='rgb', target_size=img_size)
array_image=img_to_array(image)
array_image=array_image/255.0
data.append(array_image)
data_label.append(i)
i=i+1
tmp=list(zip(data, data_label))
data, data_label=zip(*tmp)
return data, data_label
print("Loading training images...")
train_data, train_label=load_data(train_dir, labels)
train_data=np.array(train_data)
train_label=np.array(train_label)
print("Loading test images...")
test_data, test_label=load_data(test_dir, labels)
test_data=np.array(test_data)
test_label=np.array(test_label)
network=tf.keras.models.Sequential()
network.add(Input(shape=shape))
network.add(Conv2D(32, (3, 3), padding="same", activation="relu", strides=(1, 1)))
network.add(MaxPooling2D((2, 2), strides=(2, 2)))
network.add(Conv2D(64, (3, 3), padding="same", activation="relu", strides=(1, 1)))
network.add(MaxPooling2D((2, 2), strides=(2, 2)))
network.add(Flatten())
network.add(Dense(512, activation="relu"))
network.add(Dropout(0.5))
network.add(Dense(num_classes, activation="softmax"))
network.compile(optimizer=optimizer, loss="sparse_categorical_crossentropy", metrics=metrics)
print(network.summary())
weights=calculate_class_weights(train_label, num_classes)
history = network.fit(x=train_data,y=train_label,
batch_size=batch_size,
epochs=epochs,
class_weight=weights,
verbose=1)
train_loss, train_accuracy = network.evaluate(x=train_data,y=train_label, verbose=1)
test_loss, test_accuracy = network.evaluate(x=test_data, y=test_label, verbose=1)
predicted=network.predict(test_data, batch_size=16, verbose=1)
predicted = predicted.argmax(axis=-1)
print(f"Train accuracy = {train_accuracy}")
print(f"Test accuracy = {test_accuracy}")
###Output
['Kiwi', 'Melon Piel de Sapo', 'Eggplant', 'Pear Williams', 'Chestnut', 'Cherry Wax Black', 'Huckleberry', 'Pear Abate', 'Clementine', 'Pear Red', 'Lychee', 'Cocos', 'Mango', 'Granadilla', 'Avocado', 'Corn Husk', 'Peach Flat', 'Banana Lady Finger', 'Passion Fruit', 'Pear 2', 'Potato Red Washed', 'Nectarine', 'Orange', 'Cherry Wax Yellow', 'Cauliflower', 'Maracuja', 'Mandarine', 'Nut Forest', 'Mango Red', 'Apple Golden 3', 'Apple Braeburn', 'Apple Red Yellow 1', 'Tomato 1', 'Watermelon', 'Pitahaya Red', 'Kaki', 'Cherry 2', 'Strawberry Wedge', 'Guava', 'Tomato Heart', 'Corn', 'Nut Pecan', 'Peach 2', 'Limes', 'Banana Red', 'Tomato Yellow', 'Grape White 4', 'Grape Blue', 'Pineapple Mini', 'Pear Stone', 'Grape White 3', 'Tomato not Ripened', 'Tangelo', 'Potato Red', 'Ginger Root', 'Kohlrabi', 'Onion Red Peeled', 'Pepper Yellow', 'Plum 2', 'Pepper Red', 'Walnut', 'Apricot', 'Onion Red', 'Pear', 'Raspberry', 'Cucumber Ripe', 'Cactus fruit', 'Lemon Meyer', 'Tomato 4', 'Pepino', 'Cucumber Ripe 2', 'Cherry 1', 'Grape White 2', 'Onion White', 'Apple Golden 1', 'Blueberry', 'Potato Sweet', 'Redcurrant', 'Peach', 'Cantaloupe 1', 'Banana', 'Apple Red Yellow 2', 'Beetroot', 'Strawberry', 'Plum 3', 'Papaya', 'Dates', 'Apple Red 2', 'Grape White', 'Physalis', 'Apple Granny Smith', 'Avocado ripe', 'Tomato 3', 'Rambutan', 'Pear Monster', 'Carambula', 'Salak', 'Apple Crimson Snow', 'Apple Pink Lady', 'Pepper Green', 'Plum', 'Tomato 2', 'Apple Red Delicious', 'Kumquats', 'Cherry Rainier', 'Nectarine Flat', 'Grape Pink', 'Cherry Wax Red', 'Pear Kaiser', 'Fig', 'Grapefruit White', 'Pomelo Sweetie', 'Tomato Maroon', 'Hazelnut', 'Apple Golden 2', 'Apple Red 1', 'Pepper Orange', 'Grapefruit Pink', 'Physalis with Husk', 'Potato White', 'Tamarillo', 'Mangostan', 'Pineapple', 'Apple Red 3', 'Pear Forelle', 'Tomato Cherry Red', 'Pomegranate', 'Mulberry', 'Quince', 'Lemon', 'Cantaloupe 2']
Loading training images...
Loading test images...
Model: "sequential_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_5 (Conv2D) (None, 32, 32, 32) 896
max_pooling2d_5 (MaxPooling (None, 16, 16, 32) 0
2D)
conv2d_6 (Conv2D) (None, 16, 16, 64) 18496
max_pooling2d_6 (MaxPooling (None, 8, 8, 64) 0
2D)
flatten_2 (Flatten) (None, 4096) 0
dense_3 (Dense) (None, 512) 2097664
dropout_1 (Dropout) (None, 512) 0
dense_4 (Dense) (None, 131) 67203
=================================================================
Total params: 2,184,259
Trainable params: 2,184,259
Non-trainable params: 0
_________________________________________________________________
None
Epoch 1/20
4231/4231 [==============================] - 30s 7ms/step - loss: 38.5723 - accuracy: 0.8377
Epoch 2/20
4231/4231 [==============================] - 30s 7ms/step - loss: 6.1971 - accuracy: 0.9701
Epoch 3/20
4231/4231 [==============================] - 31s 7ms/step - loss: 4.5828 - accuracy: 0.9794
Epoch 4/20
4231/4231 [==============================] - 30s 7ms/step - loss: 3.6678 - accuracy: 0.9831
Epoch 5/20
4231/4231 [==============================] - 30s 7ms/step - loss: 3.1450 - accuracy: 0.9870
Epoch 6/20
4231/4231 [==============================] - 30s 7ms/step - loss: 2.8773 - accuracy: 0.9881
Epoch 7/20
4231/4231 [==============================] - 30s 7ms/step - loss: 2.6481 - accuracy: 0.9889
Epoch 8/20
4231/4231 [==============================] - 30s 7ms/step - loss: 2.3012 - accuracy: 0.9912
Epoch 9/20
4231/4231 [==============================] - 30s 7ms/step - loss: 2.2768 - accuracy: 0.9916
Epoch 10/20
4231/4231 [==============================] - 30s 7ms/step - loss: 2.2120 - accuracy: 0.9926
Epoch 11/20
4231/4231 [==============================] - 30s 7ms/step - loss: 1.9573 - accuracy: 0.9927
Epoch 12/20
4231/4231 [==============================] - 30s 7ms/step - loss: 2.1271 - accuracy: 0.9935
Epoch 13/20
4231/4231 [==============================] - 30s 7ms/step - loss: 2.2086 - accuracy: 0.9931
Epoch 14/20
4231/4231 [==============================] - 30s 7ms/step - loss: 2.1034 - accuracy: 0.9931
Epoch 15/20
4231/4231 [==============================] - 30s 7ms/step - loss: 2.0698 - accuracy: 0.9940
Epoch 16/20
4231/4231 [==============================] - 30s 7ms/step - loss: 1.9629 - accuracy: 0.9945
Epoch 17/20
4231/4231 [==============================] - 30s 7ms/step - loss: 2.0355 - accuracy: 0.9944
Epoch 18/20
4231/4231 [==============================] - 30s 7ms/step - loss: 2.1734 - accuracy: 0.9947
Epoch 19/20
4231/4231 [==============================] - 30s 7ms/step - loss: 1.9340 - accuracy: 0.9951
Epoch 20/20
4231/4231 [==============================] - 30s 7ms/step - loss: 2.3614 - accuracy: 0.9949
2116/2116 [==============================] - 11s 5ms/step - loss: 1.3447e-04 - accuracy: 0.9999
709/709 [==============================] - 4s 5ms/step - loss: 0.3049 - accuracy: 0.9759
1418/1418 [==============================] - 3s 2ms/step
Train accuracy = 0.9999409317970276
Test accuracy = 0.9759343862533569
###Markdown
Welcome to DESI High @ Google Collab!  An exciting adventure awaits you as you will soon have at your fingertips hot-off-the-telescope DESI data to run your own experiments. For you to create and save your own experiments, we'll run everything on Google Colab. This will leave you with a copy of any additions on your Google Drive. We'll need the starter script, downloading from the DESI High repo:
###Code
! wget --no-clobber -O colab.py https://raw.githubusercontent.com/michaelJwilson/desihigh/main/colab.py
###Output
File ‘colab.py’ already there; not retrieving.
###Markdown
So, without further ado, the magic incantation:
###Code
import colab
###Output
_____no_output_____
###Markdown
Welcome to DESI High @ Google Collab! testing merge with upstream 
###Code
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
An exciting adventure awaits you as you will soon have at your fingertips hot-off-the-telescope DESI data to run your own experiments. For you to create and save your own experiments, we'll run everything on Google Colab. This will leave you with a copy of any additions on your Google Drive. First, we'll need the starter script:
###Code
! wget -O setup_colab.py https://raw.githubusercontent.com/michaelJwilson/desihigh/main/setup_colab.py
###Output
--2022-02-11 22:23:24-- https://raw.githubusercontent.com/michaelJwilson/desihigh/main/setup_colab.py
Resolving raw.githubusercontent.com... 2606:50c0:8000::154, 2606:50c0:8001::154, 2606:50c0:8002::154, ...
Connecting to raw.githubusercontent.com|2606:50c0:8000::154|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 1123 (1.1K) [text/plain]
Saving to: 'setup_colab.py'
setup_colab.py 100%[===================>] 1.10K --.-KB/s in 0s
2022-02-11 22:23:24 (51.0 MB/s) - 'setup_colab.py' saved [1123/1123]
###Markdown
So, without further ado, the magic incantation:
###Code
import setup_colab
###Output
_____no_output_____
###Markdown
###Code
from PIL import Image
###Output
_____no_output_____
###Markdown
Welcome to DESI High @ Google Collab!
###Code
Image.open("desihigh/images/colab.webp")
###Output
_____no_output_____
###Markdown
An exciting adventure awaits you as you will soon have at your fingertips hot-off-the-telescope DESI data to run your own experiments. For you to create and save your own experiments, we'll run everything on Google Colab. This will leave you with a copy of any additions on your Google Drive. We'll need the starter script, downloading from the DESI High repo:
###Code
! wget -O colab.py https://raw.githubusercontent.com/michaelJwilson/desihigh/main/colab.py
! mv /content/drive/MyDrive/desihigh /content/drive/MyDrive/_desihigh
###Output
_____no_output_____
###Markdown
So, without further ado, the magic incantation:
###Code
import colab
###Output
Mounted at /content/drive/
###Markdown
Welcome to DESI High @ Google Collab!  An exciting adventure awaits you as you will soon have at your fingertips hot-off-the-telescope DESI data to run your own experiments. For you to create and save your own experiments, we'll run everything on Google Colab. This will leave you with a copy of any additions on your Google Drive. We'll need the starter script, downloading from the DESI High repo:
###Code
! wget --no-clobber -O colab.py https://raw.githubusercontent.com/michaelJwilson/desihigh/main/colab.py
###Output
File 'colab.py' already there; not retrieving.
###Markdown
So, without further ado, the magic incantation:
###Code
import colab
###Output
_____no_output_____
###Markdown
Welcome to DESI High @ Google Collab!  An exciting adventure awaits you as you will soon have at your fingertips hot-off-the-telescope DESI data to run your own experiments. For you to create and save your own experiments, we'll run everything on Google Colab. This will leave you with a copy of any additions on your Google Drive. We'll need the starter script, downloading from the DESI High repo:
###Code
! wget --no-clobber -O colab.py https://raw.githubusercontent.com/michaelJwilson/desihigh/main/colab.py
###Output
File ‘colab.py’ already there; not retrieving.
###Markdown
So, without further ado, the magic incantation:
###Code
import colab
###Output
Mounted at /content/drive/
###Markdown
Welcome to DESI High @ Google Collab! 
###Code
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
An exciting adventure awaits you as you will soon have at your fingertips hot-off-the-telescope DESI data to run your own experiments. For you to create and save your own experiments, we'll run everything on Google Colab. This will leave you with a copy of any additions on your Google Drive. First, we'll need the starter script:
###Code
! wget -O setup_colab.py https://raw.githubusercontent.com/michaelJwilson/desihigh/main/setup_colab.py
###Output
--2022-02-11 22:23:24-- https://raw.githubusercontent.com/michaelJwilson/desihigh/main/setup_colab.py
Resolving raw.githubusercontent.com... 2606:50c0:8000::154, 2606:50c0:8001::154, 2606:50c0:8002::154, ...
Connecting to raw.githubusercontent.com|2606:50c0:8000::154|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 1123 (1.1K) [text/plain]
Saving to: 'setup_colab.py'
setup_colab.py 100%[===================>] 1.10K --.-KB/s in 0s
2022-02-11 22:23:24 (51.0 MB/s) - 'setup_colab.py' saved [1123/1123]
###Markdown
So, without further ado, the magic incantation:
###Code
import setup_colab
###Output
_____no_output_____
###Markdown
필요한 필수 새팅 작업
###Code
!ls
!pip install -r drive/'My Drive'/'KoGPT2-FineTuning_pre'/requirements.txt
import os
import sys
sys.path.append('drive/My Drive/KoGPT2-FineTuning_pre')
logs_base_dir = "runs"
from jupyter_main_auto import main
ctx= 'cuda'
cachedir='~/kogpt2/'
load_path = './gdrive/My Drive/KoGPT2-FineTuning_pre/checkpoint/KoGPT2_checkpoint_640000.tar' # 이어서 학습시킬 모델 경로
save_path = './gdrive/My Drive/KoGPT2-FineTuning_pre/checkpoint/' # 학습한 모델을 저장시킬 경로
data_file_path = './gdrive/My Drive/KoGPT2-FineTuning_pre/dataset/dataset.csv' # 학습할 데이터셋 경로
###Output
_____no_output_____
###Markdown
모델 학습 시작
###Code
# 저장 잘 되는지 테스트
drive.mount('/content/gdrive')
f = open(save_path+ 'KoGPT2_checkpoint_' + str(142) + '.tar', 'w')
f.write("가자")
f.close()
main(load_path = load_path, data_file_path = data_file_path, save_path = './gdrive/My Drive/KoGPT2-FineTuning_pre/checkpoint/', summary_url = './gdrive/My Drive/KoGPT2-FineTuning_pre/runs/2020-07-20/', text_size = 500, new = 1, batch_size = 1)
###Output
_____no_output_____
###Markdown
Define model
###Code
import torch
import torch.nn as nn
import torch.nn.functional as F
# from x_transformers import *
from x_transformers import TransformerWrapper, Decoder
from x_transformers.autoregressive_wrapper import AutoregressiveWrapper, top_k, top_p, entmax, ENTMAX_ALPHA
from timm.models.vision_transformer import VisionTransformer
from timm.models.vision_transformer_hybrid import HybridEmbed
from timm.models.resnetv2 import ResNetV2
from timm.models.layers import StdConv2dSame
from einops import rearrange, repeat
class CustomARWrapper(AutoregressiveWrapper):
def __init__(self, *args, **kwargs):
super(CustomARWrapper, self).__init__(*args, **kwargs)
@torch.no_grad()
def generate(self, start_tokens, seq_len, eos_token=None, temperature=1., filter_logits_fn=top_k, filter_thres=0.9, **kwargs):
device = start_tokens.device
was_training = self.net.training
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
self.net.eval()
out = start_tokens
mask = kwargs.pop('mask', None)
if mask is None:
mask = torch.full_like(out, True, dtype=torch.bool, device=out.device)
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
mask = mask[:, -self.max_seq_len:]
# print('arw:',out.shape)
logits = self.net(x, mask=mask, **kwargs)[:, -1, :]
if filter_logits_fn in {top_k, top_p}:
filtered_logits = filter_logits_fn(logits, thres=filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
elif filter_logits_fn is entmax:
probs = entmax(logits / temperature, alpha=ENTMAX_ALPHA, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
mask = F.pad(mask, (0, 1), value=True)
if eos_token is not None and (torch.cumsum(out == eos_token, 1)[:, -1] >= 1).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
self.net.train(was_training)
return out
class CustomVisionTransformer(VisionTransformer):
def __init__(self, img_size=224, patch_size=16, *args, **kwargs):
super(CustomVisionTransformer, self).__init__(img_size=img_size, patch_size=patch_size, *args, **kwargs)
self.height, self.width = img_size
self.patch_size = patch_size
def forward_features(self, x):
print(np.shape(x))
B, c, h, w = x.shape
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
h, w = h//self.patch_size, w//self.patch_size
pos_emb_ind = repeat(torch.arange(h)*(self.width//self.patch_size-w), 'h -> (h w)', w=w)+torch.arange(h*w)
pos_emb_ind = torch.cat((torch.zeros(1), pos_emb_ind+1), dim=0).long()
x += self.pos_embed[:, pos_emb_ind]
#x = x + self.pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x
class Model(nn.Module):
def __init__(self, encoder: CustomVisionTransformer, decoder: CustomARWrapper, args, temp: float = .333):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.bos_token = args.bos_token
self.eos_token = args.eos_token
self.max_seq_len = args.max_seq_len
self.temperature = temp
@torch.no_grad()
def forward(self, x: torch.Tensor):
print("forward", x)
device = x.device
encoded = self.encoder(x.to(device))
dec = self.decoder.generate(torch.LongTensor([self.bos_token]*len(x))[:, None].to(device), self.max_seq_len,
eos_token=self.eos_token, context=encoded, temperature=self.temperature)
return dec
def get_model(args, training=False):
backbone = ResNetV2(
layers=args.backbone_layers, num_classes=0, global_pool='', in_chans=args.channels,
preact=False, stem_type='same', conv_layer=StdConv2dSame)
min_patch_size = 2**(len(args.backbone_layers)+1)
def embed_layer(**x):
ps = x.pop('patch_size', min_patch_size)
assert ps % min_patch_size == 0 and ps >= min_patch_size, 'patch_size needs to be multiple of %i with current backbone configuration' % min_patch_size
return HybridEmbed(**x, patch_size=ps//min_patch_size, backbone=backbone)
encoder = CustomVisionTransformer(img_size=(args.max_height, args.max_width),
patch_size=args.patch_size,
in_chans=args.channels,
num_classes=0,
embed_dim=args.dim,
depth=args.encoder_depth,
num_heads=args.heads,
embed_layer=embed_layer
).to(args.device)
decoder = CustomARWrapper(
TransformerWrapper(
num_tokens=args.num_tokens,
max_seq_len=args.max_seq_len,
attn_layers=Decoder(
dim=args.dim,
depth=args.num_layers,
heads=args.heads,
**args.decoder_args
)),
pad_value=args.pad_token
).to(args.device)
model = Model(encoder, decoder, args)
# if training:
# # check if largest batch can be handled by system
# im = torch.empty(args.batchsize, args.channels, args.max_height, args.min_height, device=args.device).float()
# seq = torch.randint(0, args.num_tokens, (args.batchsize, args.max_seq_len), device=args.device).long()
# decoder(seq, context=encoder(im)).sum().backward()
# model.zero_grad()
# torch.cuda.empty_cache()
# del im, seq
return model
from torchsummary import summary
from dataset.dataset import test_transform
import cv2
import pandas.io.clipboard as clipboard
from PIL import ImageGrab
from PIL import Image
import os
import sys
import argparse
import logging
import yaml
import re
import numpy as np
import torch
from torchvision import transforms
from munch import Munch
from transformers import PreTrainedTokenizerFast
from timm.models.resnetv2 import ResNetV2
from timm.models.layers import StdConv2dSame
from dataset.latex2png import tex2pil
from models import get_model
from utils import *
last_pic = None
# if arguments is None:
# arguments = Munch({'config': 'settings/config.yaml', 'checkpoint': 'checkpoints/weights.pth', 'no_cuda': True, 'no_resize': False})
arguments = Munch({'epoch': 0, 'backbone_layers': [2, 3, 7], 'betas': [0.9, 0.999], 'batchsize': 10, 'bos_token': 1, 'channels': 1, 'data': 'dataset/data/train.pkl', 'debug': False, 'decoder_args': {'attn_on_attn': True, 'cross_attend': True, 'ff_glu': True, 'rel_pos_bias': False, 'use_scalenorm': False}, 'dim': 256, 'encoder_depth': 4, 'eos_token': 2, 'epochs': 10, 'gamma': 0.9995, 'heads': 8, 'id': None, 'load_chkpt': None, 'lr': 0.001, 'lr_step': 30, 'max_height': 192, 'max_seq_len': 512, 'max_width': 672, 'min_height': 32, 'min_width': 32, 'model_path': 'checkpoints', 'name': 'pix2tex', 'num_layers': 4, 'num_tokens': 8000, 'optimizer': 'Adam', 'output_path': 'outputs', 'pad': False, 'pad_token': 0, 'patch_size': 16, 'sample_freq': 3000, 'save_freq': 5, 'scheduler': 'StepLR', 'seed': 42, 'temperature': 0.2, 'test_samples': 5, 'testbatchsize': 20, 'tokenizer': 'dataset/tokenizer.json', 'valbatches': 100, 'valdata': 'dataset/data/val.pkl', 'wandb': False, 'device': 'cpu', 'max_dimensions': [672, 192], 'min_dimensions': [32, 32], 'out_path': 'checkpoints/pix2tex', 'config': 'settings/config.yaml', 'checkpoint': 'checkpoints/weights.pth', 'no_cuda': False, 'no_resize': False})
# logging.getLogger().setLevel(logging.FATAL)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
with open(arguments.config, 'r') as f:
params = yaml.load(f, Loader=yaml.FullLoader)
args = parse_args(Munch(params))
args.update(**vars(arguments))
# args.device = "cpu"
args.device = 'cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu'
model = get_model(args)
# summary(model, (1, 1, 64, 352))
# model.load_state_dict(torch.load(args.checkpoint, map_location=args.device))
# summary(model, (1, 1, 64, 352))
# if 'image_resizer.pth' in os.listdir(os.path.dirname(args.checkpoint)) and not arguments.no_resize:
# image_resizer = ResNetV2(layers=[2, 3, 3], num_classes=max(args.max_dimensions)//32, global_pool='avg', in_chans=1, drop_rate=.05,
# preact=True, stem_type='same', conv_layer=StdConv2dSame).to(args.device)
# image_resizer.load_state_dict(torch.load(os.path.join(os.path.dirname(args.checkpoint), 'image_resizer.pth'), map_location=args.device))
# image_resizer.eval()
# else:
# image_resizer = None
# tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.tokenizer)
# return args, model, image_resizer, tokenizer
from PIL import Image
img = Image.open("./dataset/sample/1000a29807.png")
torch.Size([1, 1, 64, 352])
encoder, decoder = model.encoder, model.decoder
if type(img) is bool:
img = None
if img is None:
if last_pic is None:
print('Provide an image.')
else:
img = last_pic.copy()
else:
last_pic = img.copy()
img = minmax_size(pad(img), args.max_dimensions, args.min_dimensions)
if image_resizer is not None and not args.no_resize:
with torch.no_grad():
input_image = img.convert('RGB').copy()
r, w, h = 1, input_image.size[0], input_image.size[1]
for _ in range(10):
h = int(h * r) # height to resize
img = pad(minmax_size(input_image.resize((w, h), Image.BILINEAR if r > 1 else Image.LANCZOS), args.max_dimensions, args.min_dimensions))
t = test_transform(image=np.array(img.convert('RGB')))['image'][:1].unsqueeze(0)
w = (image_resizer(t.to(args.device)).argmax(-1).item()+1)*32
logging.info(r, img.size, (w, int(input_image.size[1]*r)))
if (w == img.size[0]):
break
r = w/img.size[0]
else:
img = np.array(pad(img).convert('RGB'))
t = test_transform(image=img)['image'][:1].unsqueeze(0)
im = t.to(args.device)
with torch.no_grad():
model.eval()
device = args.device
encoded = encoder(im.to(device))
dec = decoder.generate(torch.LongTensor([args.bos_token])[:, None].to(device), args.max_seq_len,
eos_token=args.eos_token, context=encoded.detach(), temperature=args.get('temperature', .25))
pred = post_process(token2str(dec, tokenizer)[0])
try:
clipboard.copy(pred)
except:
pass
pred
prediction = pred.replace('<', '\\lt ').replace('>', '\\gt ')
prediction
html = str('\\left\\{\\begin{array}{r c l}{{\\delta_{\\epsilon}B}}&{{\\sim}}&{{\\epsilon F\\,,}}\\\\ {{\\delta_{\\epsilon}F}}&{{\\sim}}&{{\\partial\\epsilon+\\epsilon B\\,,}}\\end{array}\\right.')
html
from bs4 import BeautifulSoup
soup = BeautifulSoup(html)
print(soup.get_text())
pageSource = """
<html>
<head><script id="MathJax-script" src="qrc:MathJax.js"></script>
<script>
MathJax.Hub.Config({messageStyle: 'none',tex2jax: {preview: 'none'}});
MathJax.Hub.Queue(
function () {
document.getElementById("equation").style.visibility = "";
}
);
</script>
</head> """ + """
<body>
<div id="equation" style="font-size:1em; visibility:hidden">$${equation}$$</div>
</body>
</html>
""".format(equation=prediction)
from IPython.core.display import display, HTML
display(HTML(pageSource))
###Output
_____no_output_____
###Markdown
Welcome to DESI High @ Google Collab!  An exciting adventure awaits you as you will soon have at your fingertips hot-off-the-telescope DESI data to run your own experiments. For you to create and save your own experiments, we'll run everything on Google Colab. This will leave you with a copy of any additions on your Google Drive. We'll need the starter script, downloading from the DESI High repo:
###Code
! wget --no-clobber -O colab.py https://raw.githubusercontent.com/michaelJwilson/desihigh/main/colab.py
###Output
File ‘colab.py’ already there; not retrieving.
###Markdown
So, without further ado, the magic incantation:
###Code
import colab
###Output
_____no_output_____
###Markdown
###Code
import numpy
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import np_utils
numpy.random.seed(42)
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)
model = Sequential()
model.add(Dense(800, input_dim=784, activation="relu",
kernel_initializer="normal"))
model.add(Dense(10, activation="softmax", kernel_initializer="normal"))
model.compile(loss="categorical_crossentropy", optimizer="SGD", metrics=["accuracy"])
print(model.summary())
model.fit(X_train, Y_train, batch_size=200, epochs=25, validation_split=0.2, verbose=2)
scores = model.evaluate(X_test, Y_test, verbose=0)
print("accuracy: %.2f%%" % (scores[1]*100))
###Output
_____no_output_____ |
source/notebooks/lecture25_intro_scikit-learn_cont.ipynb | ###Markdown
25) Intro to Scikit-Learn, Cont!*This notebook contains excerpts from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the original content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).*Some other libraries available: - [Scikit-Image](http://scikit-image.org/) for image analysis- [Statsmodels](http://statsmodels.sourceforge.net/) for statistical modeling- [AstroPy](http://astropy.org/) for astronomy and astrophysics- [NiPy](http://nipy.org/) for neuro-imaging, and many, many more... Start with the assumption that someone has done something similar to what you want, and search the internet for it! Hyperparameters and Model ValidationLast time, we saw the basic recipe for applying a supervised machine learning model:0. Assemble data into a features matrix and (if applicable) a target vector1. Choose a class of model2. Choose model hyperparameters3. Fit the model to the training data4. Use the model to predict labels for new dataThe choice of model and choice of hyperparameters are perhaps the most important part of using these tools and techniques effectively.In order to make an informed choice, we need a way to *validate* that our model and our hyperparameters are a good fit to the data.While this may sound simple, there are some pitfalls that you must avoid to do this effectively. Thinking about Model ValidationIn principle, model validation is very simple: after choosing a model and its hyperparameters, we can estimate how effective it is by applying it to some of the training data and comparing the prediction to the known value.The following sections first show a naive approach to model validation and why itfails, before exploring the use of holdout sets and cross-validation for more robustmodel evaluation. Model validation the wrong wayLet's demonstrate the naive approach to validation using the Iris data:
###Code
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
y = iris.target
###Output
_____no_output_____
###Markdown
Next we choose a model and hyperparameters. Here we'll use a *k*-neighbors classifier with ``n_neighbors=1``.This is a very simple and intuitive model that says "the label of an unknown point is the same as the label of its closest training point."(FYI, the default n_neighbors=5, meaning that it will be classified based on the majority vote of its k=5 nearest neighbors).
###Code
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors=1)
###Output
_____no_output_____
###Markdown
Then we train the model, and use it to predict labels for data we already know:
###Code
model.fit(X, y)
y_model = model.predict(X)
###Output
_____no_output_____
###Markdown
Finally, we compute the fraction of correctly labeled points:
###Code
from sklearn.metrics import accuracy_score
accuracy_score(y, y_model)
###Output
_____no_output_____
###Markdown
We see an accuracy score of 1.0, which indicates that 100% of points were correctly labeled by our model! But is this truly measuring the expected accuracy? Have we really come upon a model that we expect to be correct 100% of the time?As you may have gathered, the answer is no. In fact, this approach contains a fundamental flaw: it trains and evaluates the model on the same data. Furthermore, the nearest neighbor model is an instance-based estimator that simply stores the training data, and predicts labels by comparing new data to these stored points: except in contrived cases, it will get 100% accuracy every time! Model validation the right way: Holdout setsSo what can be done?A better sense of a model's performance can be found using what's known as a *holdout set*: that is, we hold back some subset of the data from the training of the model, and then use this holdout set to check the model performance.This splitting can be done using the ``train_test_split`` utility in Scikit-Learn:
###Code
from sklearn.model_selection import train_test_split
# split the data with 50% in each set
X1, X2, y1, y2 = train_test_split(X, y, random_state=0,
test_size=0.5)
# fit the model on one set of data
model.fit(X1, y1)
# evaluate the model on the second set of data
y2_model = model.predict(X2)
accuracy_score(y2, y2_model)
###Output
_____no_output_____
###Markdown
We see here a more reasonable result: the nearest-neighbor classifier is about 90% accurate on this hold-out set. The hold-out set is similar to unknown data, because the model has not "seen" it before. Model validation via cross-validationOne disadvantage of using a holdout set for model validation is that we have lost a portion of our data to the model training.In the above case, half the dataset does not contribute to the training of the model!This is not optimal, and can cause problems – especially if the initial set of training data is small.One way to address this is to use *cross-validation*; that is, to do a sequence of fits where each subset of the data is used both as a training set and as a validation set.Visually, it might look something like this:Here we do two validation trials, alternately using each half of the data as a holdout set.Using the split data from before, we could implement it like this:
###Code
y2_model = model.fit(X1, y1).predict(X2)
y1_model = model.fit(X2, y2).predict(X1)
accuracy_score(y1, y1_model), accuracy_score(y2, y2_model)
###Output
_____no_output_____
###Markdown
What comes out are two accuracy scores, which we could combine (by, say, taking the mean) to get a better measure of the global model performance.This particular form of cross-validation is a *two-fold cross-validation*—that is, one in which we have split the data into two sets and used each in turn as a validation set.We could expand on this idea to use even more trials, and more folds in the data—for example, here is a visual depiction of five-fold cross-validation: Here we split the data into five groups, and use each of them in turn to evaluate the model fit on the other 4/5 of the data.This would be rather tedious to do by hand, and so we can use Scikit-Learn's ``cross_val_score`` convenience routine to do it succinctly:
###Code
from sklearn.model_selection import cross_val_score
cross_val_score(model, X, y, cv=5)
###Output
_____no_output_____
###Markdown
Repeating the validation across different subsets of the data gives us an even better idea of the performance of the algorithm.Scikit-Learn implements a number of useful cross-validation schemes that are useful in particular situations; these are implemented via iterators in the ``model_selection`` module.See [the sklearn documentation on cross-validation](https://scikit-learn.org/stable/modules/cross_validation.html) for more about the different cross-validation methods available, including, for example, handling time-series data. Selecting the Best ModelNow that we've seen the basics of validation and cross-validation, we will go into a litte more depth regarding model selection and selection of hyperparameters.These issues are some of the most important aspects of the practice of machine learning, and this information is often glossed over in introductory machine learning tutorials.Of core importance is the following question: *if our estimator is underperforming, how should we move forward?*There are several possible answers:- Use a more complicated/more flexible model- Use a less complicated/less flexible model- Gather more training samples- Gather more data to add features to each sampleThe answer to this question is often counter-intuitive.In particular, sometimes using a more complicated model will give worse results, and adding more training samples may not improve your results!The ability to determine what steps will improve your model is what separates the successful machine learning practitioners from the unsuccessful. The Bias-variance trade-offFundamentally, the question of "the best model" is about finding a sweet spot in the tradeoff between *bias* and *variance*.Consider the following figure, which presents two regression fits to the same dataset: It is clear that neither of these models is a particularly good fit to the data, but they fail in different ways.The model on the left attempts to find a straight-line fit through the data.Because the data are intrinsically more complicated than a straight line, the straight-line model will never be able to describe this dataset well.Such a model is said to *underfit* the data: that is, it does not have enough model flexibility to suitably account for all the features in the data; another way of saying this is that the model has high *bias*.The model on the right attempts to fit a high-order polynomial through the data.Here the model fit has enough flexibility to nearly perfectly account for the fine features in the data, but even though it very accurately describes the training data, its precise form seems to be more reflective of the particular noise properties of the data rather than the intrinsic properties of whatever process generated that data.Such a model is said to *overfit* the data: that is, it has so much model flexibility that the model ends up accounting for random errors as well as the underlying data distribution; another way of saying this is that the model has high *variance*. To look at this in another light, consider what happens if we use these two models to predict the y-value for some new data.In the following diagrams, the red/lighter points indicate data that is omitted from the training set: The score here is the $R^2$ score, or [coefficient of determination](https://en.wikipedia.org/wiki/Coefficient_of_determination), which measures how well a model performs relative to a simple mean of the target values. $R^2=1$ indicates a perfect match, $R^2=0$ indicates the model does no better than simply taking the mean of the data, and negative values mean even worse models.From the scores associated with these two models, we can make an observation that holds more generally:- For high-bias models, the performance of the model on the validation set is similar to the performance on the training set.- For high-variance models, the performance of the model on the validation set is far worse than the performance on the training set. If we imagine that we have some ability to tune the model complexity, we would expect the training score and validation score to behave as illustrated in the following figure: The diagram shown here is often called a *validation curve*, and we see the following essential features:- The training score is everywhere higher than the validation score. This is generally the case: the model will be a better fit to data it has seen than to data it has not seen.- For very low model complexity (a high-bias model), the training data is under-fit, which means that the model is a poor predictor both for the training data and for any previously unseen data.- For very high model complexity (a high-variance model), the training data is over-fit, which means that the model predicts the training data very well, but fails for any previously unseen data.- For some intermediate value, the validation curve has a maximum. This level of complexity indicates a suitable trade-off between bias and variance.The means of tuning the model complexity varies from model to model; [this book](https://jakevdp.github.io/PythonDataScienceHandbook/) discusses some individual models in depth in later sections of chapter 5, and discusses how each model allows for such tuning. Validation curves in Scikit-LearnLet's look at an example of using cross-validation to compute the validation curve for a class of models.Here we will use a *polynomial regression* model: this is a generalized linear model in which the degree of the polynomial is a tunable parameter.For example, a degree-1 polynomial fits a straight line to the data; for model parameters $a$ and $b$:$$y = ax + b$$A degree-3 polynomial fits a cubic curve to the data; for model parameters $a, b, c, d$:$$y = ax^3 + bx^2 + cx + d$$We can generalize this to any number of polynomial features.In Scikit-Learn, we can implement this with a simple linear regression combined with the polynomial preprocessor.We will use a *pipeline* to string these operations together:
###Code
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
###Output
_____no_output_____
###Markdown
Now let's create some data to which we will fit our model:
###Code
import numpy as np
def make_data(N, err=1.0, rseed=1):
# randomly sample the data
rng = np.random.RandomState(rseed)
X = rng.rand(N, 1) ** 2
y = 10 - 1. / (X.ravel() + 0.1)
if err > 0:
y += err * rng.randn(N)
return X, y
X, y = make_data(40)
###Output
_____no_output_____
###Markdown
We can now visualize our data, along with polynomial fits of several degrees:
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn; seaborn.set() # plot formatting
# trick to make our Nx1 X matrix
X_test = np.linspace(-0.1, 1.1, 500)[:, None]
print(X_test.shape)
# ravel() returns a flattened array
plt.scatter(X.ravel(), y, color='black')
axis = plt.axis()
for degree in [1, 3, 5]:
y_test = PolynomialRegression(degree).fit(X, y).predict(X_test)
plt.plot(X_test.ravel(), y_test, label='degree={0}'.format(degree))
plt.xlim(-0.1, 1.0)
plt.ylim(-2, 12)
plt.legend(loc='best');
###Output
_____no_output_____
###Markdown
The knob controlling model complexity in this case is the degree of the polynomial, which can be any non-negative integer.A useful question to answer is this: what degree of polynomial provides a suitable trade-off between bias (under-fitting) and variance (over-fitting)?We can make progress in this by visualizing the validation curve for this particular data and model; this can be done straightforwardly using the ``validation_curve`` convenience routine provided by Scikit-Learn.Given a model, data, parameter name, and a range to explore, this function will automatically compute both the training score and validation score across the range:
###Code
from sklearn.model_selection import validation_curve
degree = np.arange(0, 21)
#cv stands for cross-validation; the integer given is the K in Kfold cv
train_score, val_score = validation_curve(PolynomialRegression(), X, y,
'polynomialfeatures__degree', degree, cv=7)
plt.plot(degree, np.median(train_score, 1), color='blue', label='training score')
plt.plot(degree, np.median(val_score, 1), color='red', label='validation score')
plt.legend(loc='best')
plt.ylim(0, 1)
plt.xlabel('degree')
plt.ylabel('score')
plt.xticks(np.arange(0, 21, step=4));
###Output
_____no_output_____
###Markdown
This shows precisely the qualitative behavior we expect: the training score is everywhere higher than the validation score; the training score is monotonically improving with increased model complexity; and the validation score reaches a maximum before dropping off as the model becomes over-fit.From the validation curve, we can read-off that the optimal trade-off between bias and variance is found for a third-order polynomial; we can compute and display this fit over the original data as follows:
###Code
plt.scatter(X.ravel(), y)
lim = plt.axis()
y_test = PolynomialRegression(3).fit(X, y).predict(X_test)
plt.plot(X_test.ravel(), y_test);
plt.axis(lim);
###Output
_____no_output_____
###Markdown
Notice that finding this optimal model did not actually require us to compute the training score, but examining the relationship between the training score and validation score can give us useful insight into the performance of the model. Learning CurvesOne important aspect of model complexity is that the optimal model will generally depend on the size of your training data.For example, let's generate a new dataset with a factor of five more points:
###Code
X2, y2 = make_data(200)
plt.scatter(X2.ravel(), y2);
###Output
_____no_output_____
###Markdown
We will duplicate the preceding code to plot the validation curve for this larger dataset; for reference let's over-plot the previous results as well:
###Code
degree = np.arange(21)
train_score2, val_score2 = validation_curve(PolynomialRegression(), X2, y2,
'polynomialfeatures__degree', degree, cv=7)
plt.plot(degree, np.median(train_score2, 1), color='blue', label='training score')
plt.plot(degree, np.median(val_score2, 1), color='red', label='validation score')
plt.plot(degree, np.median(train_score, 1), color='blue', alpha=0.3, linestyle='dashed')
plt.plot(degree, np.median(val_score, 1), color='red', alpha=0.3, linestyle='dashed')
plt.legend(loc='lower center')
plt.ylim(0, 1)
plt.xlabel('degree')
plt.ylabel('score')
plt.xticks(np.arange(0, 21, step=4));
###Output
_____no_output_____
###Markdown
The solid lines show the new results, while the fainter dashed lines show the results of the previous smaller dataset.It is clear from the validation curve that the larger dataset can support a much more complicated model: the peak here is probably around a degree of 6, but even a degree-20 model is not seriously over-fitting the data—the validation and training scores remain very close.Thus we see that the behavior of the validation curve has not one but **two important inputs: the model complexity and the number of training points.**It is often useful to to explore the behavior of the model as a function of the number of training points, which we can do by using increasingly larger subsets of the data to fit our model.A plot of the training/validation score with respect to the size of the training set is known as a *learning curve.*The general behavior we would expect from a learning curve is this:- A model of a given complexity will *overfit* a small dataset: this means the training score will be relatively high, while the validation score will be relatively low.- A model of a given complexity will *underfit* a large dataset: this means that the training score will decrease, but the validation score will increase.- A model will never, except by chance, give a better score to the validation set than the training set: this means the curves should keep getting closer together but never cross.With these features in mind, we would expect a learning curve to look qualitatively like that shown in the following figure:  The notable feature of the learning curve is the convergence to a particular score as the number of training samples grows.In particular, once you have enough points that a particular model has converged, *adding more training data will not help you!*The only way to increase model performance in this case is to use another (often more complex) model. Learning curves in Scikit-LearnScikit-Learn offers a convenient utility for computing such learning curves from your models; here we will compute a learning curve for our original dataset with a second-order polynomial model and a ninth-order polynomial:
###Code
from sklearn.model_selection import learning_curve
fig, ax = plt.subplots(1, 2, figsize=(16, 6))
fig.subplots_adjust(left=0.0625, right=0.95, wspace=0.1)
for i, degree in enumerate([2, 9]):
N, train_lc, val_lc = learning_curve(PolynomialRegression(degree),
X, y, cv=7,
train_sizes=np.linspace(0.3, 1, 25))
ax[i].plot(N, np.mean(train_lc, 1), color='blue', label='training score')
ax[i].plot(N, np.mean(val_lc, 1), color='red', label='validation score')
ax[i].hlines(np.mean([train_lc[-1], val_lc[-1]]), N[0], N[-1],
color='gray', linestyle='dashed')
ax[i].set_ylim(0, 1)
ax[i].set_xlim(N[0], N[-1])
ax[i].set_xlabel('training size')
ax[i].set_ylabel('score')
ax[i].set_title('degree = {0}'.format(degree), size=14)
ax[i].legend(loc='best')
###Output
_____no_output_____
###Markdown
This is a valuable diagnostic, because it gives us a visual depiction of how our model responds to increasing training data.In particular, when your learning curve has already converged (i.e., when the training and validation curves are already close to each other) *adding more training data will not significantly improve the fit!*This situation is seen in the left panel, with the learning curve for the degree-2 model.The only way to increase the converged score is to use a different (usually more complicated) model.We see this in the right panel: by moving to a much more complicated model, we increase the score of convergence (indicated by the dashed line), but at the expense of higher model variance (indicated by the difference between the training and validation scores).If we were to add even more data points, the learning curve for the more complicated model would eventually converge.Plotting a learning curve for your particular choice of model and dataset can help you to make this type of decision about how to move forward in improving your analysis. Validation in Practice: Grid SearchThe preceding discussion is meant to give you some intuition into the trade-off between bias and variance, and its dependence on model complexity and training set size.In practice, models generally have more than one knob to turn, and thus plots of validation and learning curves change from lines to multi-dimensional surfaces.In these cases, such visualizations are difficult and we would rather simply find the particular model that maximizes the validation score.Scikit-Learn provides automated tools to do this in the grid search module.Here is an example of using grid search to find the optimal polynomial model.We will explore a three-dimensional grid of model features; namely the polynomial degree, the flag telling us whether to fit the intercept, and the flag telling us whether to normalize the problem.This can be set up using Scikit-Learn's ``GridSearchCV`` meta-estimator:
###Code
from sklearn.model_selection import GridSearchCV
param_grid = {'polynomialfeatures__degree': np.arange(21),
'linearregression__fit_intercept': [True, False],
'linearregression__normalize': [True, False]}
grid = GridSearchCV(PolynomialRegression(), param_grid, cv=7, iid=False)
###Output
_____no_output_____
###Markdown
Notice that like a normal estimator, this has not yet been applied to any data.Calling the ``fit()`` method will fit the model at each grid point, keeping track of the scores along the way:
###Code
grid.fit(X, y);
###Output
_____no_output_____
###Markdown
Now that this is fit, we can ask for the best parameters as follows:
###Code
grid.best_params_
###Output
_____no_output_____
###Markdown
Finally, if we wish, we can use the best model and show the fit to our data using code from before:
###Code
model = grid.best_estimator_
plt.scatter(X.ravel(), y)
lim = plt.axis()
y_test = model.fit(X, y).predict(X_test)
plt.plot(X_test.ravel(), y_test);
plt.axis(lim);
###Output
_____no_output_____ |
APPETIZER/APP_CONCATENATE_DATAFRAMES.ipynb | ###Markdown
Appetizers * ***Main ***
###Code
df = pd.read_csv('APP_main.csv')
df1 = pd.read_csv('APP_main_1.csv')
df2 = pd.read_csv('APP_main_2.csv')
df3 = pd.read_csv('APP_main_3.csv')
print df1.shape
df3.head()
df = df.drop('Unnamed: 0', 1)
print df1.shape
df1 = df1.drop('Unnamed: 0', 1)
df1.head(3)
print df2.shape
df2 = df2.drop('Unnamed: 0', 1)
df2.head(3)
print df3.shape
df3 = df3.drop('Unnamed: 0', 1)
df3.head(3)
#concatenate the main tables.
APP_main= pd.concat([df, df1, df2, df3])
#create a new dataframe with selected columns
APP_main_reduced = APP_main.drop(['recipeName', 'sourceDisplayName'], axis = 1)
#peek at dataframe
print APP_main.shape
APP_main.id.duplicated().sum()
for i in APP_main.duplicated('id'):
if i == True:
print i
###Output
_____no_output_____
###Markdown
* ***Flavors***
###Code
fdf = pd.read_csv('APP_flavors.csv')
fdf1 = pd.read_csv('APP_flavors_1.csv')
fdf2 = pd.read_csv('APP_flavors_2.csv')
fdf3 = pd.read_csv('APP_flavors_3.csv')
print fdf.shape
fdf = fdf.drop('Unnamed: 0', 1)
fdf = fdf.rename(columns = {'index':'id'})
fdf.head(3)
print fdf1.shape
fdf1 = fdf1.drop('Unnamed: 0', 1)
fdf1.head(3)
#change column name index to id
fdf1=fdf1.rename(columns= {'index': 'id'})
fdf1.columns
print fdf2.shape
fdf2 = fdf2.drop('Unnamed: 0', 1)
fdf2 = fdf2.rename(columns = {'index':'id'})
fdf2.head(3)
print fdf3.shape
fdf3 = fdf3.drop('Unnamed: 0', 1)
fdf3 = fdf3.rename(columns = {'index':'id'})
fdf3.head(3)
#concatenate the flavor tables.
APP_flavors= pd.concat([fdf, fdf1, fdf2, fdf3])
#peek at dataframe
print APP_flavors.shape
APP_flavors.head(3)
for i in APP_flavors.duplicated('id'):
if i == True:
print i
###Output
_____no_output_____
###Markdown
* ***Cuisine***
###Code
cdf = pd.read_csv('APP_cuisines.csv')
cdf1 = pd.read_csv('APP_cuisines_1.csv')
cdf2 = pd.read_csv('APP_cuisines_2.csv')
cdf3 = pd.read_csv('APP_cuisines_3.csv')
print cdf.shape
cdf = cdf.drop('Unnamed: 0', 1)
cdf = cdf.rename(columns = {'index':'id'})
print cdf.columns
cdf.head(3)
print cdf1.shape
cdf1 = cdf1.drop('Unnamed: 0', 1)
print cdf1.columns
cdf1.head(3)
#change column name index to id
cdf1=cdf1.rename(columns= {'index': 'id'})
cdf1.columns
print cdf2.shape
cdf2 = cdf2.drop('Unnamed: 0', 1)
cdf2 = cdf2.rename(columns = {'index':'id'})
print cdf2.columns
cdf2.head(3)
print cdf3.shape
cdf3 = cdf3.drop('Unnamed: 0', 1)
cdf3 = cdf3.rename(columns = {'index':'id'})
print cdf3.columns
cdf3.head(3)
#concatenate the cuisine tables.
APP_cuisines= pd.concat([cdf, cdf1, cdf2, cdf3])
#peek at dataframe
print APP_cuisines.shape
APP_cuisines.head(3)
for i in APP_cuisines.duplicated('id'):
if i == True:
print i
###Output
_____no_output_____
###Markdown
* ***Details***
###Code
ddf = pd.read_csv('APP_details.csv')
ddf1 = pd.read_csv('APP_details_1.csv')
ddf2 = pd.read_csv('APP_details_2.csv')
ddf3 = pd.read_csv('APP_details_3.csv')
print ddf.shape
ddf = ddf.drop('Unnamed: 0', 1)
print ddf.columns
ddf.head(3)
print ddf1.shape
ddf1 = ddf1.drop('Unnamed: 0', 1)
print ddf1.columns
ddf1.head(3)
print ddf2.shape
ddf2 = ddf2.drop('Unnamed: 0', 1)
print ddf2.columns
ddf2.head(3)
print ddf3.shape
ddf3 = ddf3.drop('Unnamed: 0', 1)
print ddf3.columns
ddf3.head(3)
#concatenate the details tables.
APP_details= pd.concat([ddf, ddf1, ddf2, ddf3])
#peek at dataframe
print APP_details.shape
APP_details.head(3)
for i in APP_details.duplicated('id'):
if i == True:
print i
###Output
_____no_output_____
###Markdown
* ***Ingredients***
###Code
idf = pd.read_csv('APP_ingredients.csv')
idf1 = pd.read_csv('APP_ingredients_1.csv')
idf2 = pd.read_csv('APP_ingredients_2.csv')
idf3 = pd.read_csv('APP_ingredients_3.csv')
print idf.shape
idf = idf.drop('Unnamed: 0', 1)
print idf.columns
idf.head(3)
print idf1.shape
idf1 = idf1.drop('Unnamed: 0', 1)
print idf1.columns
idf1.head(3)
print idf2.shape
idf2 = idf2.drop('Unnamed: 0', 1)
print idf2.columns
idf2.head(3)
print idf3.shape
idf3 = idf3.drop('Unnamed: 0', 1)
print idf3.columns
idf3.head()
#concatenate the ingredient tables.
APP_ing= pd.concat([idf, idf1, idf2, idf3])
#create a new dataframe with selected columns
APP_ing_reduced = APP_ing[['id', 'ingredient_list']]
#drop unnamed column & make id first column
#APP_ing = APP_ing.drop('Unnamed: 0', 1)
cols = list(APP_ing)
cols.insert(0, cols.pop(cols.index('id')))
APP_ing = APP_ing.ix[:, cols]
APP_ing.head(3)
for i in APP_ing.duplicated('id'):
if i == True:
print i
###Output
_____no_output_____
###Markdown
Join all tables for Appetizers
###Code
# set index to column 'id'
_df = [APP_main, APP_main_reduced, APP_cuisines, APP_flavors, APP_details, APP_ing, APP_ing_reduced]
for df in _df:
df.set_index('id', inplace = True)
# join dataframes
APP_data = APP_main.join([APP_cuisines, APP_flavors, APP_details, APP_ing])
APP_data_reduced = APP_main_reduced.join([APP_flavors, APP_details, APP_ing_reduced])
# add course column
APP_data['course'] = 'Appetizer'
APP_data_reduced['course'] = 'Appetizer'
APP_data.shape
APP_data.head(3)
#save to csv
APP_data.to_csv('APP_data.csv')
APP_data_reduced.to_csv('APP_data_reduced.csv')
###Output
_____no_output_____ |
tutorial/proofterms.ipynb | ###Markdown
$\newcommand{\To}{\Rightarrow}$$\newcommand{\false}{\mathrm{false}}$
###Code
import os
os.chdir('..')
from kernel.type import BoolType, NatType, TFun
from kernel.term import Var, And, Implies, Inst, NatVars, Eq
from kernel.proof import Proof
from kernel.report import ProofReport
from kernel import theory
from logic import basic
from logic import matcher
from kernel.proofterm import ProofTerm
from data import nat
from syntax.settings import settings
basic.load_theory('nat')
settings.unicode = True
###Output
_____no_output_____
###Markdown
Proofs and proof-checking In the previous two sections, we showed how to prove theorems about equality and propositional logic. Let us review what we have done so far.For each statement to be proved, we first wrote down the proof step-by-step in a semi-formal language, where each step consists of applying a primitive deduction rule, or some composite rule (such as apply_theorem). This proof is then checked by converting the semi-formal proof into Python code. In the Python code, we are careful to construct theorems using only a limited set of functions: `thy.get_theorem` or one of the primitive deduction rules (`Thm.assume`, `Thm.implies_intr`, etc). For the composite rule, we defined corresponding Python functions, which again construct theorems using only the limited set of functions.In this way, as long as we keep to calling only the limited set of functions, either directly or indirectly through custom procedures, and if the limited set of functions correctly implement the primitive deduction rules of higher-order logic, we can trust that our proof is correct.While this already substantially increases our confidence in the proofs, we can still do better. First, it is difficult to ensure that the theorem objects are constructed using only the limited set of functions. This gets more difficult as the size of the code increases, and when multiple teams are collaborating on a project. Second, we cannot completely trust our implementation of the primitive deduction rules. Ideally, we should allow programs written by others to check our proof. Both of these problems can be solved by storing a trace of the proof, so it can be checked later, either using the same program or using other programs.There are two kinds of traces that we will consider, which we call linear proof and proof term.A linear proof (class `Proof`) is a list of proof items (class `ProofItem`). Each proof item contains an identifier, deduction rule used, arguments to the deduction rule, and input sequents (referred to by the identifiers of corresponding proof items).Let us consider a simple example, the proof of $A \to A$. Recall the proof is as follows:0. $A \vdash A$ by assume A.1. $\vdash A \to A$ by implies_intr A from 0.The corresponding linear proof is:
###Code
A = Var("A", BoolType)
prf = Proof()
prf.add_item(0, "assume", args=A)
prf.add_item(1, "implies_intr", args=A, prevs=[0])
print(prf)
###Output
0: assume A
1: implies_intr A from 0
###Markdown
This proof can be *checked* using the `check_proof` method. This method takes a proof as input, and checks the proof in the context of the current theory. If the check succeeds, it returns the theorem obtained by the proof.
###Code
res = theory.check_proof(prf)
print(res)
###Output
⊢ A ⟶ A
###Markdown
Checking the proof also records the sequent obtained at each line of the proof. These will be displayed the next time the proof is printed:
###Code
print(prf)
###Output
0: A ⊢ A by assume A
1: ⊢ A ⟶ A by implies_intr A from 0
###Markdown
Proof-checking will uncover any mistakes in the proof, including application of proof rules on inputs that are invalid. For example:
###Code
B = Var("B", BoolType)
C = Var("C", BoolType)
prf2 = Proof()
prf2.add_item(0, "assume", args=Implies(A,B))
prf2.add_item(1, "assume", args=C)
prf2.add_item(2, "implies_elim", prevs=[0,1])
theory.check_proof(prf2) # raises CheckProofException
###Output
_____no_output_____
###Markdown
Proof checking takes an optional *proof report*, which records statistics from the proof. Continuing from the first example for which proof-checking is successful, we have:
###Code
rpt = ProofReport()
theory.check_proof(prf, rpt=rpt)
print(rpt)
###Output
Steps: 2
Theorems: 0
Primitive: 2
Macro: 0
Theorems applied:
Macros evaluated:
Macros expanded:
Gaps: []
###Markdown
The report says the proof consists of 2 primitive steps. Otherwise it is not very interesting. We will see more features of the proof report later using more complicated examples. Proof terms While linear proofs are intuitive and can be printed in an easily readable form, they are difficult to generate automatically. When generating proofs, we prefer a pattern where each proved result is considered an object, and new results are produced by combining existing proved results in any order. In this way, the produced proof resembles a tree: at the root is the final result of the proof. At each node is an intermediate statement, and the edges characterize dependency between intermediate statements. Such trees correspond to `ProofTerm` objects in Python.The proof term for the theorem $A \to A$ is constructed as follows:
###Code
pt0 = ProofTerm.assume(A)
pt1 = pt0.implies_intr(A)
print(pt1)
###Output
ProofTerm(⊢ A ⟶ A)
###Markdown
Note the similarity with constructing theorems in previous sections. In general, a proof term can be considered as a theorem with extra information: the full history of how the theorem is derived. Any proof term has a field `th`, which is the theorem obtained by the proof.Any proof term can be converted to a linear proof using the `export` method, which can be used for proof checking or display.
###Code
prf = pt1.export()
theory.check_proof(prf)
print(prf)
###Output
0: A ⊢ A by assume A
1: ⊢ A ⟶ A by implies_intr A from 0
###Markdown
Existing theorems can be invoked using the `ProofTerm.theorem` function. We give an example on substitution of identities:
###Code
a = Var("a", NatType)
b = Var("b", NatType)
pt0 = ProofTerm.theorem('add_assoc')
pt1 = pt0.substitution(x=a, y=b, z=nat.one)
print(pt1)
###Output
ProofTerm(⊢ a + b + 1 = a + (b + 1))
###Markdown
Again, the proof can be checked and printed as follows:
###Code
prf = pt1.export()
theory.check_proof(prf)
print(prf)
###Output
0: ⊢ ?x + ?y + ?z = ?x + (?y + ?z) by theorem add_assoc
1: ⊢ a + b + 1 = a + (b + 1) by substitution {x: a, y: b, z: (1::nat)} from 0
###Markdown
We can also see the report from proof checking:
###Code
rpt = ProofReport()
theory.check_proof(prf, rpt)
print(rpt)
###Output
Steps: 2
Theorems: 1
Primitive: 1
Macro: 0
Theorems applied: add_assoc
Macros evaluated:
Macros expanded:
Gaps: []
###Markdown
This report is slightly more interesting. It states that the proof consists of one invocation of existing theorem, and one primitive step (`substitution`). The only theorem applied in the proof is `add_assoc`. All of the methods for constructing `Thm` objects have their corresponding methods for proof terms. For convenience, many of the methods for proof terms can be invoked by a `ProofTerm` object (rather than through the `ProofTerm` class). Note that a `ProofTerm` object is immutable - the methods return new proof terms rather than modifying the existing ones. For example, methods dealing with equalities are:
###Code
x, y, z = NatVars('x y z')
f = Var('f', TFun(NatType, BoolType))
pt = ProofTerm.assume(Eq(x, y))
pt2 = ProofTerm.assume(Eq(y, z))
print(ProofTerm.reflexive(x))
print(pt.symmetric())
print(pt.transitive(pt2))
print(ProofTerm.reflexive(f).combination(pt))
###Output
ProofTerm(⊢ x = x)
ProofTerm(x = y ⊢ y = x)
ProofTerm(x = y, y = z ⊢ x = z)
ProofTerm(x = y ⊢ f x ⟷ f y)
###Markdown
Function producing proof terms Just as for theorems, we can write our own functions for producing proof terms. For example, we can write a new `apply_theorem` function, this time operating on proof terms:
###Code
def apply_theorem(th_name, *args, inst=None):
pt = ProofTerm.theorem(th_name)
As, _ = pt.prop.strip_implies() # list of assumptions of th
if inst is None:
inst = Inst() # initial (empty) instantiation
for A, arg in zip(As, args): # match each assumption with corresponding arg
inst = matcher.first_order_match(A, arg.prop, inst)
pt = pt.substitution(inst)
for arg in args: # perform implies_elim on th
pt = pt.implies_elim(arg)
return pt
###Output
_____no_output_____
###Markdown
We first test this function on a simple example:
###Code
ptA = ProofTerm.assume(A)
ptB = ProofTerm.assume(B)
ptAB = apply_theorem('conjI', ptA, ptB)
prf = ptAB.export()
theory.check_proof(prf)
print(prf)
###Output
0: ⊢ ?A ⟶ ?B ⟶ ?A ∧ ?B by theorem conjI
1: ⊢ A ⟶ B ⟶ A ∧ B by substitution {A: A, B: B} from 0
2: A ⊢ A by assume A
3: A ⊢ B ⟶ A ∧ B by implies_elim from 1, 2
4: B ⊢ B by assume B
5: A, B ⊢ A ∧ B by implies_elim from 3, 4
###Markdown
The final theorem is as expected. We can now reproduce the full proof of $A \wedge B \to B \wedge A$:
###Code
pt0 = ProofTerm.assume(And(A, B))
pt1 = apply_theorem('conjD1', pt0)
pt2 = apply_theorem('conjD2', pt0)
pt3 = apply_theorem('conjI', pt2, pt1)
pt4 = pt3.implies_intr(And(A, B))
prf = pt4.export()
theory.check_proof(prf)
print(prf)
###Output
0: ⊢ ?A ⟶ ?B ⟶ ?A ∧ ?B by theorem conjI
1: ⊢ B ⟶ A ⟶ B ∧ A by substitution {A: B, B: A} from 0
2: ⊢ ?A ∧ ?B ⟶ ?B by theorem conjD2
3: ⊢ A ∧ B ⟶ B by substitution {A: A, B: B} from 2
4: A ∧ B ⊢ A ∧ B by assume A ∧ B
5: A ∧ B ⊢ B by implies_elim from 3, 4
6: A ∧ B ⊢ A ⟶ B ∧ A by implies_elim from 1, 5
7: ⊢ ?A ∧ ?B ⟶ ?A by theorem conjD1
8: ⊢ A ∧ B ⟶ A by substitution {A: A, B: B} from 7
9: A ∧ B ⊢ A by implies_elim from 8, 4
10: A ∧ B ⊢ B ∧ A by implies_elim from 6, 9
11: ⊢ A ∧ B ⟶ B ∧ A by implies_intr A ∧ B from 10
###Markdown
We can also view the report from checking the proof:
###Code
rpt = ProofReport()
theory.check_proof(prf, rpt)
print(rpt)
###Output
Steps: 12
Theorems: 3
Primitive: 9
Macro: 0
Theorems applied: conjD1, conjD2, conjI
Macros evaluated:
Macros expanded:
Gaps: []
###Markdown
$\newcommand{\To}{\Rightarrow}$$\newcommand{\false}{\mathrm{false}}$
###Code
import os, sys
sys.path.append(os.path.split(os.getcwd())[0])
from kernel.type import boolT
from kernel.term import Term, Var
from kernel.proof import Proof
from kernel.report import ProofReport
from logic import basic
from logic import matcher
from logic.logic import conj
from logic.nat import natT, one
from logic.proofterm import ProofTerm
from syntax import printer
thy = basic.load_theory('nat')
###Output
_____no_output_____
###Markdown
Proofs and proof-checking In the previous two sections, we showed how to prove theorems about equality and propositional logic. Let us review what we have done so far.For each statement to be proved, we first wrote down the proof step-by-step in a semi-formal language, where each step consists of applying a primitive deduction rule, or some composite rule (such as apply_theorem). This proof is then checked by converting the semi-formal proof into Python code. In the Python code, we are careful to construct theorems using only a limited set of functions: `thy.get_theorem` or one of the primitive deduction rules (`Thm.assume`, `Thm.implies_intr`, etc). For the composite rule, we defined corresponding Python functions, which again construct theorems using only the limited set of functions.In this way, as long as we keep to calling only the limited set of functions, either directly or indirectly through custom procedures, and if the limited set of functions correctly implement the primitive deduction rules of higher-order logic, we can trust that our proof is correct.While this already substantially increases our confidence in the proofs, we can still do better. First, it is difficult to ensure that the theorem objects are constructed using only the limited set of functions. This gets more difficult as the size of the code increases, and when multiple teams are collaborating on a project. Second, we cannot completely trust our implementation of the primitive deduction rules. Ideally, we should allow programs written by others to check our proof. Both of these problems can be solved by storing a trace of the proof, so it can be checked later, either using the same program or using other programs.There are two kinds of traces that we will consider, which we call linear proof and proof term.A linear proof (class `Proof`) is a list of proof items (class `ProofItem`). Each proof item contains an identifier, deduction rule used, arguments to the deduction rule, and input sequents (referred to by the identifiers of corresponding proof items).Let us consider a simple example, the proof of $A \to A$. Recall the proof is as follows:0. $A \vdash A$ by assume A.1. $\vdash A \to A$ by implies_intr A from 0.The corresponding linear proof is:
###Code
A = Var("A", boolT)
prf = Proof()
prf.add_item(0, "assume", args=A)
prf.add_item(1, "implies_intr", args=A, prevs=[0])
print(printer.print_proof(thy, prf))
###Output
0: assume A
1: implies_intr A from 0
###Markdown
Note the use of `print_proof` function to print the proof. This proof can be *checked* using the `check_proof` method. This method takes a proof as input, and checks the proof in the context of the current theory. If the check succeeds, it returns the theorem obtained by the proof.
###Code
res = thy.check_proof(prf)
print(printer.print_thm(thy, res, unicode=True))
###Output
⊢ A ⟶ A
###Markdown
Checking the proof also records the sequent obtained at each line of the proof. These will be displayed the next time the proof is printed:
###Code
print(printer.print_proof(thy, prf, unicode=True))
###Output
0: A ⊢ A by assume A
1: ⊢ A ⟶ A by implies_intr A from 0
###Markdown
Proof-checking will uncover any mistakes in the proof, including application of proof rules on inputs that are invalid. For example:
###Code
B = Var("B", boolT)
C = Var("C", boolT)
prf2 = Proof()
prf2.add_item(0, "assume", args=Term.mk_implies(A,B))
prf2.add_item(1, "assume", args=C)
prf2.add_item(2, "implies_elim", prevs=[0,1])
thy.check_proof(prf2) # raises CheckProofException
###Output
_____no_output_____
###Markdown
Proof checking takes an optional *proof report*, which records statistics from the proof. Continuing from the first example for which proof-checking is successful, we have:
###Code
rpt = ProofReport()
thy.check_proof(prf, rpt=rpt)
print(rpt)
###Output
Steps: 2
Theorems: 0
Primitive: 2
Macro: 0
Theorems applied:
Macros evaluated:
Macros expanded:
Gaps: []
###Markdown
The report says the proof consists of 2 primitive steps. Otherwise it is not very interesting. We will see more features of the proof report later using more complicated examples. Proof terms While linear proofs are intuitive and can be printed in an easily readable form, they are difficult to generate automatically. When generating proofs, we prefer a pattern where each proved result is considered an object, and new results are produced by combining existing proved results in any order. In this way, the produced proof resembles a tree: at the root is the final result of the proof. At each node is an intermediate statement, and the edges characterize dependency between intermediate statements. Such trees correspond to `ProofTerm` objects in Python.The proof term for the theorem $A \to A$ is constructed as follows:
###Code
pt0 = ProofTerm.assume(A)
pt1 = ProofTerm.implies_intr(A, pt0)
print(printer.print_thm(thy, pt1.th, unicode=True))
###Output
⊢ A ⟶ A
###Markdown
Note the similarity with constructing theorems in previous sections. In general, a proof term can be considered as a theorem with extra information: the full history of how the theorem is derived. Any proof term has a field `th`, which is the theorem obtained by the proof.Any proof term can be converted to a linear proof using the `export` method, which can be used for proof checking or display.
###Code
prf = pt1.export()
thy.check_proof(prf)
print(printer.print_proof(thy, prf, unicode=True))
###Output
0: A ⊢ A by assume A
1: ⊢ A ⟶ A by implies_intr A from 0
###Markdown
Existing theorems can be invoked using the `ProofTerm.theorem` function. We give an example on substitution of identities:
###Code
a = Var("a", natT)
b = Var("b", natT)
pt0 = ProofTerm.theorem(thy, 'add_assoc')
pt1 = ProofTerm.substitution({"x": a, "y": b, "z": one}, pt0)
print(printer.print_thm(thy, pt1.th, unicode=True))
###Output
⊢ a + b + 1 = a + (b + 1)
###Markdown
Again, the proof can be checked and printed as follows:
###Code
prf = pt1.export()
thy.check_proof(prf)
print(printer.print_proof(thy, prf, unicode=True))
###Output
0: ⊢ x + y + z = x + (y + z) by theorem add_assoc
1: ⊢ a + b + 1 = a + (b + 1) by substitution {x: a, y: b, z: 1} from 0
###Markdown
We can also see the report from proof checking:
###Code
rpt = ProofReport()
thy.check_proof(prf, rpt)
print(rpt)
###Output
Steps: 2
Theorems: 1
Primitive: 1
Macro: 0
Theorems applied: add_assoc
Macros evaluated:
Macros expanded:
Gaps: []
###Markdown
This report is slightly more interesting. It states that the proof consists of one invocation of existing theorem, and one primitive step (`substitution`). The only theorem applied in the proof is `add_assoc`. Function producing proof terms Just as for theorems, we can write our own functions for producing proof terms. For example, we can write a new `apply_theorem` function, this time operating on proof terms:
###Code
def apply_theorem(thy, th_name, *args, instsp=None):
pt = ProofTerm.theorem(thy, th_name)
As, _ = pt.prop.strip_implies() # list of assumptions of th
if instsp is None:
instsp = dict(), dict() # initial (empty) instantiation
for A, arg in zip(As, args): # match each assumption with corresponding arg
matcher.first_order_match_incr(A, arg.prop, instsp)
tyinst, inst = instsp
pt2 = ProofTerm.subst_type(tyinst, pt) if tyinst else pt # perform substitution on th
pt3 = ProofTerm.substitution(inst, pt2) if inst else pt2
for arg in args: # perform implies_elim on th
pt3 = ProofTerm.implies_elim(pt3, arg)
return pt3
###Output
_____no_output_____
###Markdown
A slight difference is that we perform the substitutions only if the dictionary is non-empty. This helps avoid one primitive step in many cases.We first test this function on a simple example:
###Code
ptA = ProofTerm.assume(A)
ptB = ProofTerm.assume(B)
ptAB = apply_theorem(thy, 'conjI', ptA, ptB)
prf = ptAB.export()
thy.check_proof(prf)
print(printer.print_proof(thy, prf, unicode=True))
###Output
0: ⊢ A ⟶ B ⟶ A ∧ B by theorem conjI
1: ⊢ A ⟶ B ⟶ A ∧ B by substitution {A: A, B: B} from 0
2: A ⊢ A by assume A
3: A ⊢ B ⟶ A ∧ B by implies_elim from 1, 2
4: B ⊢ B by assume B
5: A, B ⊢ A ∧ B by implies_elim from 3, 4
###Markdown
The final theorem is as expected. We can now reproduce the full proof of $A \wedge B \to B \wedge A$:
###Code
pt0 = ProofTerm.assume(conj(A, B))
pt1 = apply_theorem(thy, 'conjD1', pt0)
pt2 = apply_theorem(thy, 'conjD2', pt0)
pt3 = apply_theorem(thy, 'conjI', pt2, pt1)
pt4 = ProofTerm.implies_intr(conj(A, B), pt3)
prf = pt4.export()
thy.check_proof(prf)
print(printer.print_proof(thy, prf, unicode=True))
###Output
0: ⊢ A ⟶ B ⟶ A ∧ B by theorem conjI
1: ⊢ B ⟶ A ⟶ B ∧ A by substitution {A: B, B: A} from 0
2: ⊢ A ∧ B ⟶ B by theorem conjD2
3: ⊢ A ∧ B ⟶ B by substitution {A: A, B: B} from 2
4: A ∧ B ⊢ A ∧ B by assume A ∧ B
5: A ∧ B ⊢ B by implies_elim from 3, 4
6: A ∧ B ⊢ A ⟶ B ∧ A by implies_elim from 1, 5
7: ⊢ A ∧ B ⟶ A by theorem conjD1
8: ⊢ A ∧ B ⟶ A by substitution {A: A, B: B} from 7
9: A ∧ B ⊢ A by implies_elim from 8, 4
10: A ∧ B ⊢ B ∧ A by implies_elim from 6, 9
11: ⊢ A ∧ B ⟶ B ∧ A by implies_intr A ∧ B from 10
###Markdown
We can also view the report from checking the proof:
###Code
rpt = ProofReport()
thy.check_proof(prf, rpt)
print(rpt)
###Output
Steps: 12
Theorems: 3
Primitive: 9
Macro: 0
Theorems applied: conjI, conjD1, conjD2
Macros evaluated:
Macros expanded:
Gaps: []
|
Project/.ipynb_checkpoints/TESTING-checkpoint.ipynb | ###Markdown
Explanation of the testing process
###Code
img=cv2.imread('test_sample/model_testing_#1.png') #reading the image input
plt.imshow(img)
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) #converting the image to gray scale
plt.imshow(gray)
resized = cv2.resize(gray,(28,28),interpolation=cv2.INTER_AREA) #resizing our image for using convolution.
plt.imshow(resized)
newimg=tf.keras.utils.normalize(resized) #normalizing the data.
plt.imshow(newimg)
newimg=np.array(newimg).reshape(-1,28,28,1) #kernal operation for convolution layer.
newimg.shape #displaying the shape of resultent image.
predictions=model.predict(newimg)
print("models prediction is:",np.argmax(predictions)) #displaying the model's prediction for our image.
###Output
models prediction is: 1
###Markdown
Cheking the input image and Pridicting the result [TESTING]
###Code
#importing the function from python file for testing.
from testing_function import get_prediction
###Output
_____no_output_____
###Markdown
>Testing on MNIST database images: Test_Case 1
###Code
image=x_train[59001]
get_prediction(image)
###Output
_____no_output_____
###Markdown
Test_Case 2
###Code
image=x_train[1]
get_prediction(image)
###Output
_____no_output_____
###Markdown
Test_Case 3
###Code
image=x_train[128]
get_prediction(image)
###Output
_____no_output_____
###Markdown
>Testing on real life images: Test_Case 1
###Code
#predicting the digit from our model
image='test_sample/model_testing_#6.jpg'
get_prediction(image)
###Output
_____no_output_____
###Markdown
Test_Case 2
###Code
#predicting the digit from our model
image='test_sample/model_testing_#8.png'
get_prediction(image)
###Output
_____no_output_____
###Markdown
Test_Case 3
###Code
#predicting the digit from our model
image='test_sample/model_testing_#2.jpg'
get_prediction(image)
###Output
_____no_output_____ |
introduction-to-programming-with-python/assignments/cscie7_assignment_08.ipynb | ###Markdown
Homework 8 2021 Fill in your name
###Code
first_name = "PREESA"
last_name = "SAEWONG"
nick_name = "REE"
assert(len(first_name) != 0)
assert(len(last_name) != 0)
assert(len(nick_name) != 0)
###Output
_____no_output_____
###Markdown
Problem 1: Secret CodeWe looked at ROT-13, a simple substitution cipher, in Office Hours last week.This week we will write another cipher:```textPlain: abcdefghijklmnopqrstuvwxyzCipher: zyxwvutsrqponmlkjihgfedcba```To hide our tracks, when we encode, we split the string into blocks of 5 characters. See the Unit Tests for examples.Write functions encode and decode that take a string and return an encoded or decoded string.
###Code
import string
alpha_low = string.ascii_lowercase
alpha_low_rev = alpha_low[::-1]
def encode(plain_text: str) -> str:
## Turn the entire string lower case
plain_text = plain_text.lower()
## Remove spaces and punctuations in the string
plain_text = "".join([ch for ch in plain_text if (not ch.isspace()) and (not ch in string.punctuation)])
## Map each letter with the encoded letter and keep numbers as are
cipher = "".join([alpha_low_rev[alpha_low.index(ch)] if (not ch.isdigit()) else (ch) for ch in plain_text])
## Split every 5 characters in the string and join it with a space
cipher = " ".join([cipher[i:i+5] for i in range(0, len(cipher), 5)])
return cipher
def decode(ciphered_text: str) -> str:
## Remove spaces in the string
ciphered_text = "".join([ch for ch in ciphered_text if (not ch.isspace())])
## Map each letter with the encoded letter and keep numbers as are
plain_text = "".join([alpha_low[alpha_low_rev.index(ch)] if (not ch.isdigit()) else (ch) for ch in ciphered_text])
return plain_text
###Output
_____no_output_____
###Markdown
Unit Tests
###Code
def test_code():
assert encode("no") == "ml"
assert encode("yes") == "bvh"
assert encode("OMG") == "lnt"
assert encode("O M G") == "lnt"
assert encode("mindblowingly") == "nrmwy oldrm tob"
assert encode("Testing, 1 2 3, testing.") == "gvhgr mt123 gvhgr mt"
assert encode("Truth is fiction.") == "gifgs rhurx grlm"
plaintext = "The quick brown fox jumps over the lazy dog."
ciphertext = "gsvjf rxpyi ldmul cqfnk hlevi gsvoz abwlt"
assert encode(plaintext) == ciphertext
assert decode("zmlyh") == "anobs"
assert decode("zmlyh gzxov rhlug vmzhg vkkrm thglm v") == "anobstacleisoftenasteppingstone"
assert decode("gvhgr mt123 gvhgr mt") == "testing123testing"
ciphertext = "gsvjf rxpyi ldmul cqfnk hlevi gsvoz abwlt"
plaintext = "thequickbrownfoxjumpsoverthelazydog"
assert decode(ciphertext) == plaintext
ciphertext = "zmlyhgzxovrhlugvmzhgvkkrmthglmv"
plaintext = "anobstacleisoftenasteppingstone"
assert decode(ciphertext) == plaintext
assert decode(encode("Testing, 1 2 3, testing.")) == "testing123testing"
print('Success!')
test_code()
###Output
Success!
###Markdown
Problem 2: DatesFill in the defintion of the three method below for a class Date
###Code
class Date(object):
"Represent a calendar date"
def __init__(self, day=0, month=0, year=0):
"""Initialize"""
self.day = day
self.month = month
self.year = year
def __str__(self):
"""Print the date as string in this format: M/D/Y"""
return f"{self.month}/{self.day}/{self.year}"
def before(self, other):
"""Check if this date comes before the other date"""
if self.year < other.year:
return True
elif self.year == other.year:
if self.month < other.month:
return True
elif self.month == other.month:
if self.day < other.day:
return True
## (self.day < other.day) and (self.day == other.day)
else:
return False
else:
return False
###Output
_____no_output_____
###Markdown
Unit Tests
###Code
def test_dates():
t1 = Date(1, 2, 3)
assert t1.__str__() == '2/1/3'
t2 = Date(4, 5, 2)
assert t2.__str__() == '5/4/2'
assert not t1.before(t1)
assert t2.before(t1)
assert not t1.before(t2)
t2 = Date(4, 1, 3)
assert t2.__str__() == '1/4/3'
assert not t1.before(t1)
assert t2.before(t1)
t1 = Date(2, 2, 3)
t2 = Date(1, 2, 3)
assert t2.__str__() == '2/1/3'
assert not t1.before(t1)
assert not t1.before(t2)
assert t2.before(t1)
print("Success!")
test_dates()
###Output
Success!
###Markdown
Problem 3: IntervalsThis problems takes one of the share problems from Day 7 and solves it using Objects.Finish the definition of the class Interval. The interval '[a..b]' includes all the points a, b, and all points between them.Decide if two intervals are equal. Print an interval as [a..b]. Decide if two intervals overlap, and merge two intervals.
###Code
class Interval(object):
def __init__(self, a, b):
"""Initialize an inteval"""
self.a = a
self.b = b
def __eq__(self, other):
"""Check if the two intervals are equal"""
## Make sure the type of self and other are same
if not isinstance(self, Interval):
self = Interval(self[0], self[1])
if not isinstance(other, Interval):
other = Interval(other[0], other[1])
p1 = normalize((self.a, self.b))
p2 = normalize((other.a, other.b))
return p1[0] == p2[0] and p1[1] == p2[1]
def __str__(self):
"""Return a string representation of the interval object"""
print(f"[{self.a}..{self.b}] contains numbers: {list(range(self.a, self.b+1))}")
return f"[{self.a}..{self.b}]"
def intersect(self, other):
"""This method returns True if the two intervals intersect."""
p1 = normalize((self.a, self.b))
p2 = normalize((other.a, other.b))
return contains(p1[0], p2) or contains(p1[1], p2) or contains(p2[0], p1) or contains(p2[1], p1)
def merge(self, other):
"""Merge two intersecting Intervals"""
p1 = normalize((self.a, self.b))
p2 = normalize((other.a, other.b))
return min(p1[0], p2[0]), max(p1[1], p2[1])
## The following are functions, not methods:
def contains(x, interval):
"""This function returns True if the value x is within the closed interval."""
return (interval[0] <= x and x <= interval[1])
def normalize(interval):
"""This function takes an interval in the form of (a, b) and turns it into the standard form.
If a <= b, return (a, b). If a > b, return (b, a)."""
return (min(interval[0], interval[1]), max(interval[0], interval[1]))
###Output
_____no_output_____
###Markdown
Unit Tests
###Code
def test_interval():
assert Interval(1, 3) == Interval(1, 3)
assert Interval(1, 3) == Interval(3, 1)
assert Interval(1, 3).__str__() == '[1..3]'
assert Interval(1, 3).intersect(Interval(2, 4))
assert not Interval(1, 3).intersect(Interval(4, 7))
assert Interval(1, 3).merge(Interval(2, 4)) == Interval(1, 4)
print('Success!')
test_interval()
###Output
[1..3] contains numbers: [1, 2, 3]
Success!
|
examples/scripts/Update brondatabase json config files.ipynb | ###Markdown
Verander brondatabase in set JSON bestanden
###Code
import os
import json
###Output
_____no_output_____
###Markdown
Met dit script kan de brondatabase van meerdere json bestanden worden geupdate. De bestanden worden overschreven.
###Code
# path to the json files
path_json = r"c:\Users\905872\Documents\Gitlab\BG7587_HyDAMO_GML\examples\wvv\json"
# object that will be updated
json_objects = [
os.path.join(path_json, "hydroobject.json"),
os.path.join(path_json, "stuw.json"),
os.path.join(path_json, "duikersifonhevel.json"),
# os.path.join(path_json, "afsluitmiddel.json"),
# os.path.join(path_json, "brug.json"),
# os.path.join(path_json, "brug_dwp.json"),
# os.path.join(path_json, "gemaal.json"),
# os.path.join(path_json, "pomp.json"),
# # dwarsprofiel heeft andere brondata
# os.path.join(path_json, "dwarsprofiel.json"),
]
###Output
_____no_output_____
###Markdown
In de onderstaande loop wordt de bron database geupdate. Het JSON bestand wordt overschreven.
###Code
for obj in json_objects:
with open(obj, 'r+') as f:
print(obj)
data = json.load(f)
# values to update:
data['source']['path'] = r"c:\Users\905872\Documents\Gitlab\BG7587_HyDAMO_GML\examples\wvv\gdb\Breg_Hydamo.gdb" # <--- add `id` value.
f.seek(0) # <--- should reset file position to the beginning.
json.dump(data, f, indent=4)
f.truncate() # remove remaining part
###Output
c:\Users\905872\Documents\Gitlab\BG7587_HyDAMO_GML\examples\wvv\json\gemaal.json
c:\Users\905872\Documents\Gitlab\BG7587_HyDAMO_GML\examples\wvv\json\pomp.json
|
Code/Linear Regression.ipynb | ###Markdown
Linear Regression/ Hồi quy tuyến tính Import một số thư viện sklearn là thư viện Scikit Learn, chuyên cho Machine Learning
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
###Output
_____no_output_____
###Markdown
Đọc dữ liệu từ bảng ở file csv, có thể đọc được từ file excel, txt...
###Code
df = pd.read_csv('../data/weight-height.csv')
df.head()
df.plot(kind='scatter',
x='Height',
y='Weight',
title='Weight and Height')
###Output
_____no_output_____
###Markdown
Lấy data ở cột Height, Weight, chuyển nó thành array, rồi lật nó từ vector ngang thành vector dọc (reshape)
###Code
X = np.asarray(df['Height']).reshape(-1, 1)
X
y = np.asarray(df['Weight']).reshape(-1, 1)
y
###Output
_____no_output_____
###Markdown
Train cho model Chạy hàm fit() để train
###Code
regression = LinearRegression()
regression.fit(X, y)
###Output
_____no_output_____
###Markdown
Kết quả: Hệ số góc của đường thẳng là coef_, còn gọi là weight Hệ số tự do của đường thẳng là intercept_, còn gọi là bias
###Code
# Hệ số góc, weight
w = regression.coef_
# Hệ số chặn, bias
b = regression.intercept_
###Output
_____no_output_____
###Markdown
Tạo 1 mảng x để vẽ đường thẳng
###Code
x = np.linspace(55, 80, 100).reshape(-1,1)
###Output
_____no_output_____
###Markdown
Dùng hàm predict() để tính giá trị dự đoán của model
###Code
yhat = regression.predict(x)
df.plot(kind='scatter',
x='Height',
y='Weight',
title='Weight and Height in adults')
plt.plot(x, yhat, color='red', linewidth=3)
###Output
_____no_output_____
###Markdown
Loss Function / Hàm mất mát Dùng hàm mean_squared_error của module sklearn.metrics để tự động tính sai số quân phương
###Code
y_pred = regression.predict(X)
from sklearn.metrics import mean_squared_error
mean_squared_error(y, y_pred, squared=False)
###Output
_____no_output_____
###Markdown
Chỉ số R bình phương
###Code
from sklearn.metrics import r2_score
r2_score(y, y_pred)
###Output
_____no_output_____
###Markdown
Train Test Split Import hàm train_test_split, nó sẽ tự động chia data thành tập huấn luyện và tập kiểm thử theo tỉ lệ mình yêu cầu
###Code
from sklearn.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
Chia 20% data để kiểm thử, 80% để huấn luyện
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2)
len(X_train)
len(X_test)
len(y_train)
len(y_test)
###Output
_____no_output_____
###Markdown
Train lại
###Code
regression.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Tính giá trị dự đoán cho tập test và tập train (thường thì chỉ cần tính giá trị cho tập test)
###Code
y_train_pred = regression.predict(X_train)
y_test_pred = regression.predict(X_test)
###Output
_____no_output_____
###Markdown
Tính giá trị loss function trên tập huấn luyện
###Code
mean_squared_error(y_train, y_train_pred, squared=False)
###Output
_____no_output_____
###Markdown
Tính cost function trên tập kiểm thử
###Code
mean_squared_error(y_test, y_test_pred, squared=False)
###Output
_____no_output_____
###Markdown
Nhận xét: loss trên tập train khác loss trên tập test Tương tự với chỉ số R bình phương
###Code
r2_score(y_train, y_train_pred)
r2_score(y_test, y_test_pred)
###Output
_____no_output_____ |
posts/xarray-geoviews-a-new-perspective-on-oceanographic-data-part-ii.ipynb | ###Markdown
In a [previous post](https://madhatter106.github.io/DataScienceCorner/posts/xarray-geoviews-a-new-perspective-on-oceanographic-data-part-i/), I introduced xarray with some simple manipulation and data plotting. In this super-short post, I'm going to do some more manipulation, using multiple input files to create a new dimension, reorganize the data and store them in multiple output files. All but with a few lines of code. GOAL:The ultimate goal here is to create new datasets, one for band, that aggregate results across experiments so as to facilitate inter-experiment comparisons. HOW: I will load netCDF files from a number of Monte-Carlo uncertainty experiments, among which the source of the uncertainty differs; Lt (sensor noise), wind, pressure, relative humidity, all the above.At the end of this post, I will have 6 files, one per visible SeaWiFS visible bandcontaining one 3D array where dimensions are latitude, longitude, experiment. WHY: I'm doing this to create an interactive visualization (*cf.* [next post]()) using GeoViews, where the goal is to compare, band-wise, cross-experiment results.As usual, start with some imports...
###Code
import xarray as xr
import os
import glob
###Output
_____no_output_____
###Markdown
Now I set up some file path logic to avoid rewriting full file paths. I then accrue file paths into a list. I, fpaths. The new files I will next create will be stored in the 'Synthesis' directory for later retrieval.
###Code
dataDir = '/accounts/ekarakoy/disk02/UNCERTAINTIES/Monte-Carlo/DATA/AncillaryMC/'
expDirs = ['Lt', 'AllAnc_Lt', 'Pressure', 'RH', 'WindSpeed', 'O3']
outDir = 'Synthesis'
fpattern = 'S20031932003196.L3m_4D_SU*.nc'
fpaths = [glob.glob(os.path.join(dataDir, expDir, fpattern))[0] for expDir in expDirs]
###Output
_____no_output_____
###Markdown
I'm only interested in the visible bands because of the [black pixel assumption used in the atmospheric correction applied during the processing phase](http://www.ioccg.org/training/SLS-2012/Mobley_Lect3.pdf), which renders Rrs in the near-infrared bands useless.
###Code
bands = [412, 443, 490, 510, 555, 670]
###Output
_____no_output_____
###Markdown
***xarray*** has a nifty feature that allows opening multiple datasets, and automatically concatenating matching (by name and dimension) arrays, with the option of naming the thus newly created dimension. In our case, this is *'experiment'*. The next line of code, below, opens what will end up being a temporary xarray Dataset - note that you will need [dask](http://dask.pydata.org/en/latest/) installed for this. I'll then label the *experiment* dimension with the appropriate experiment names. Importantly, the concatenation direction reflects the order in which the file paths are specified, and it's also the order the experiment names are in in the *'expDirs'* list defined above. I also make sure that the Rrs uncertainty data is labeled the same, 'rrs_unc'.
###Code
with xr.open_mfdataset(fpaths, concat_dim='experiment') as allData:
allData.coords['experiment'] = expDirs
for band in bands:
foutpath = os.path.join(dataDir, outDir, '%s%d%s' %(fpattern.split('SU')[0],
band, '.nc'))
if not os.path.exists(os.path.dirname(foutpath)):
os.makedirs(os.path.dirname(foutpath))
data = allData.data_vars['Rrs_unc_%d' % band]
data.name='rrs_unc'
dsData = data.to_dataset()
dsData.to_netcdf(path=foutpath, engine='netcdf4')
###Output
_____no_output_____
###Markdown
Verify that all the files are where they should be - in the Synthesis directory
###Code
os.listdir(os.path.dirname(foutpath))
###Output
_____no_output_____ |
Engg_Comp/fixedbeam_sf_bm.ipynb | ###Markdown
Fixed Beam subjected to Point LoadsConsider a fixed beam subjected to point loads applied perpendicular to the span. The structure is statically indeterminate to second degree. However, our knowledge of fixed end moments, obtained from other methods such as strain energy or conjugate beam method give us the expressions to compute the fixed end moments $M_a$ and $M_b$ at the left and right supports respectively in a fixed beam of span $L$ due to a single point load $P_i$ applied at a distance $a_i$ from the left support as follows:.$$\begin{align*}M_a &= \frac{1}{L^2} \sum P_i \, a_i \, (L - a_i)^2 \\M_b &= \frac{1}{L^2} \sum P_i \, (L - a_i) \, a_i^2\end{align*}$$Knowing the fixed end moments at the two ends, we are left with only two unknown reactions, namely, $R_a$ and $R_b$ at the keft and right supports respectively. These can be determined by writing the two equations of static equilibrium, namely:$$\begin{align*}\sum F_x &= R_a + R_b - \sum_{i=0}^{n-1} P_i = 0 \\\sum M_a &= R_b \, L + M_a - M_b - \sum_{i=0}^{n-1} P_i \, a_i = 0\end{align*}$$These equations can be rearranged to obtain the reactions $R_a$ and $R_b$ as follows:$$\begin{align*}R_b &= \frac{1}{L} \left( M_b - M_a + \sum_{i=0}^{n-1} P_i \, a_i \right) \\R_a &= \sum_{i=0}^{n-1} P_i - R_b \end{align*}$$In the above expressions, $n$ is the number of applied point loads.
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# Input Data
L = 10.0
P = np.array([50.0, 75.0, 40.0])
a = np.array([2.0, 5.0, 6.0])
ndiv = 1000
nprint = 100
# Echo of input
nsec = ndiv + 1 # Number of sections
print 'Span =', L
print 'Loads =', P
print 'Distance of loads =', a
print 'Number of equal divisions =', ndiv
print 'Number of sections =', nsec
# Reactions
sumP = 0.0
sumM = 0.0
Ma = 0.0
Mb = 0.0
nloads = P.size
for i in range(nloads):
sumP += P[i]
sumM += P[i] * a[i]
Ma += P[i] * a[i] * (L - a[i])**2 / L**2
Mb += P[i] * (L - a[i]) * a[i]**2 / L**2
Rb = (Mb - Ma + sumM) / L
Ra = sumP - Rb
print '\nReactions'
print 'Ra =', Ra, 'Ma =', Ma, 'Rb =', Rb, 'Mb =', Mb
# SF and BM
V = np.zeros(nsec, dtype=float) # SF
M = np.zeros_like(V) # BM
x = np.linspace(0, L, nsec) # Section distances
print
print "%5s %10s %10s %10s" % ('', 'x (m)', 'SF (kN)', 'BM (kNm)')
for j in range(nsec): # One section at a time
V[j] = Ra
M[j] = Ra * x[j] - Ma
for i in range(nloads): # One load at a time
if a[i] < x[j]: # Load to left of section
V[j] -= P[i]
M[j] -= P[i] * (x[j] - a[i])
if j % nprint == 0:
print "%5d %10.2f %10.2f %10.2f" % (j, x[j], V[j], M[j])
# Plot of SF and BM
plt.subplot(211)
plt.plot(x, V)
plt.grid()
plt.axhline(0.0, color='k')
plt.xlabel('x (m)')
plt.ylabel('SF (kN)')
plt.title('SF Diagram')
plt.subplot(212)
plt.plot(x, M)
plt.axhline(0.0, color='k')
plt.grid()
plt.xlabel('x (m)')
plt.ylabel('BM (kNm)')
plt.title('BM Diagram')
plt.tight_layout()
plt.savefig('sf_bm.png')
plt.show()
###Output
Span = 10.0
Loads = [ 50. 75. 40.]
Distance of loads = [ 2. 5. 6.]
Number of equal divisions = 1000
Number of sections = 1001
Reactions
Ra = 96.38 Ma = 196.15 Rb = 68.62 Mb = 167.35
x (m) SF (kN) BM (kNm)
0 0.00 96.38 -196.15
100 1.00 96.38 -99.77
200 2.00 96.38 -3.39
300 3.00 46.38 42.99
400 4.00 46.38 89.37
500 5.00 46.38 135.75
600 6.00 -28.62 107.13
700 7.00 -68.62 38.51
800 8.00 -68.62 -30.11
900 9.00 -68.62 -98.73
1000 10.00 -68.62 -167.35
###Markdown
To play around with parameters, it would be best to modularise the program by converting the above program into functions that would do the following:1. Function **`input()`** where we can define the input data2. Function **`reactions`** to calculate support reactions3. Function **`sf_bm()`** to calculate SF and BM at chosen sections4. Function **`plot_sf_bm()`** to plot SF and BM diagram
###Code
def input():
L = 10.0
P = np.array([50.0, 75.0, 40.0])
a = np.array([2.0, 5.0, 6.0])
ndiv = 1000
nprint = 100
print 'Fixed Beam subjected to point Loads'
print 'Span = %.2fm' % L
print 'Magnitude of loads (kN):', P
print 'Distance of loads from left support (m):', a
print 'Number of equal divisions of span:', ndiv
return L, P, a, ndiv, nprint
def reactions(L, P, a):
sumP = 0.0
sumM = 0.0
Ma = 0.0
Mb = 0.0
nloads = P.size
for i in range(nloads):
sumP += P[i]
sumM += P[i] * a[i]
Ma += P[i] * a[i] * (L - a[i])**2 / L**2
Mb += P[i] * (L - a[i]) * a[i]**2 / L**2
Rb = (Mb - Ma + sumM) / L
Ra = sumP - Rb
return Ra, Ma, Rb, Mb
def sf_bm(L, P, a, ndiv, nprint):
V = np.zeros(nsec, dtype=float) # SF
M = np.zeros_like(V) # BM
x = np.linspace(0, L, nsec) # Section distances
print
print "%5s %10s %10s %10s" % ('', 'x (m)', 'SF (kN)', 'BM (kNm)')
for j in range(nsec): # One section at a time
V[j] = Ra
M[j] = Ra * x[j] - Ma
for i in range(nloads): # One load at a time
if a[i] < x[j]: # Load to left of section
V[j] -= P[i]
M[j] -= P[i] * (x[j] - a[i])
if j % nprint == 0:
print "%5d %10.2f %10.2f %10.2f" % (j, x[j], V[j], M[j])
return x, V, M
def plot_sf_bm(x, V, M):
plt.subplot(211)
plt.plot(x, V)
plt.grid()
plt.axhline(0.0, color='k')
plt.xlabel('x (m)')
plt.ylabel('SF (kN)')
plt.title('SF Diagram')
plt.subplot(212)
plt.plot(x, M)
plt.axhline(0.0, color='k')
plt.grid()
plt.xlabel('x (m)')
plt.ylabel('BM (kNm)')
plt.title('BM Diagram')
plt.tight_layout()
plt.savefig('sf_bm.png')
plt.show()
return
L, P, a, ndiv, nprint = input()
Ra, Ma, Rb, Mb = reactions(L, P, a)
print
print 'Reactions'
print 'Ra = %.2fkN, Ma = %.2fkNm, Rb = %.2fkN, Mb = %.2fkNm' % (Ra, Ma, Rb, Mb)
x, V, M = sf_bm(L, P, a, ndiv, nprint)
plot_sf_bm(x, V, M)
###Output
Fixed Beam subjected to point Loads
Span = 10.00m
Magnitude of loads (kN): [ 50. 75. 40.]
Distance of loads from left support (m): [ 2. 5. 6.]
Number of equal divisions of span: 1000
Reactions
Ra = 96.38kN, Ma = 196.15kNm, Rb = 68.62kN, Mb = 167.35kNm
x (m) SF (kN) BM (kNm)
0 0.00 96.38 -196.15
100 1.00 96.38 -99.77
200 2.00 96.38 -3.39
300 3.00 46.38 42.99
400 4.00 46.38 89.37
500 5.00 46.38 135.75
600 6.00 -28.62 107.13
700 7.00 -68.62 38.51
800 8.00 -68.62 -30.11
900 9.00 -68.62 -98.73
1000 10.00 -68.62 -167.35
|
knn_FaceAll_0.01band-for-nodup-normalize.ipynb | ###Markdown
KNN & DTW
###Code
# -*- coding: utf-8 -*-
class Dtw(object):
def __init__(self, seq1, seq2,
patterns = [(-1,-1), (-1,0), (0,-1)],
weights = [{(0,0):2}, {(0,0):1}, {(0,0):1}],
band_r=0.005): #EDIT HERE
self._seq1 = seq1
self._seq2 = seq2
self.len_seq1 = len(seq1)
self.len_seq2 = len(seq2)
self.len_pattern = len(patterns)
self.sum_w = [sum(ws.values()) for ws in weights]
self._r = int(len(seq1)*band_r)
assert len(patterns) == len(weights)
self._patterns = patterns
self._weights = weights
def get_distance(self, i1, i2):
return abs(self._seq1[i1] - self._seq2[i2])
def calculate(self):
g = list([float('inf')]*self.len_seq2 for i in range(self.len_seq1))
cost = list([0]*self.len_seq2 for i in range(self.len_seq1))
g[0][0] = 2*self.get_distance(0, 0)
for i in range(self.len_seq1):
for j in range(max(0,i-self._r), min(i+self._r+1, self.len_seq2)):
for pat_i in range(self.len_pattern):
coor = (i+self._patterns[pat_i][0], j+self._patterns[pat_i][1])
if coor[0]<0 or coor[1]<0:
continue
dist = 0
for w_coor_offset, d_w in self._weights[pat_i].items():
w_coor = (i+w_coor_offset[0], j+w_coor_offset[1])
dist += d_w*self.get_distance(w_coor[0], w_coor[1])
this_val = g[coor[0]][coor[1]] + dist
this_cost = cost[coor[0]][coor[1]] + self.sum_w[pat_i]
if this_val < g[i][j]:
g[i][j] = this_val
cost[i][j] = this_cost
return g[self.len_seq1-1][self.len_seq2-1]/cost[self.len_seq1-1][self.len_seq2-1], g, cost
def print_table(self, tb):
print(' '+' '.join(["{:^7d}".format(i) for i in range(self.len_seq2)]))
for i in range(self.len_seq1):
str = "{:^4d}: ".format(i)
for j in range(self.len_seq2):
str += "{:^7.3f} ".format(tb[i][j])
print (str)
def print_g_matrix(self):
_, tb, _ = self.calculate()
self.print_table(tb)
def print_cost_matrix(self):
_, _, tb = self.calculate()
self.print_table(tb)
def get_dtw(self):
ans, _, _ = self.calculate()
return ans
import csv
import random
import math
import operator
import numpy as np
def loadDataset(filename, data=[]):
with open(filename, 'rb') as csvfile:
lines = csv.reader(csvfile,delimiter=' ')
dataset = list(lines)
for x in range(len(dataset)):
dataset[x] = filter(None, dataset[x])
dataset[x] = list(map(float, dataset[x]))
data.append(dataset[x])
def euclideanDistance(instance1, instance2, length):
distance = 0
for x in range(length):
if x == 0:
continue
distance += pow((instance1[x] - instance2[x]), 2)
return math.sqrt(distance)
def getNeighbors(trainingSet, testInstance, k, pattern, weight):
distances = []
length = len(testInstance)
for x in range(len(trainingSet)):
# z-normalization
d = Dtw(testInstance[1:], trainingSet[x][1:], pattern, weight)
dist = d.get_dtw()
# dist = euclideanDistance(testInstance, trainingSet[x], length)
distances.append((trainingSet[x], dist))
distances.sort(key=operator.itemgetter(1))
# print "dist >>>> ",distances
neighbors = []
for x in range(k):
neighbors.append(distances[x][0])
return neighbors
def getResponse(neighbors):
classVotes = {}
for x in range(len(neighbors)):
response = neighbors[x][0]
if response in classVotes:
classVotes[response] += 1
else:
classVotes[response] = 1
sortedVotes = sorted(classVotes.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedVotes[0][0]
def getAccuracy(testSet, predictions):
correct = 0
for x in range(len(testSet)):
if testSet[x][0] == predictions[x]:
correct += 1
return (correct/float(len(testSet))) * 100.0
def knn(trainingSet, testSet, k, pattern, weight):
# generate predictions
predictions=[]
for x in range(len(testSet)):
# print ">>",testSet[x]
neighbors = getNeighbors(trainingSet, testSet[x], k, pattern, weight)
# print "neighbors >>", neighbors
result = getResponse(neighbors)
# print "result >>", result
predictions.append(result)
# print('> predicted=' + repr(result) + ', actual=' + repr(testSet[x][0]))
accuracy = getAccuracy(testSet, predictions)
return accuracy
def prepareData(train_data, test_data):
# prepare data
rawTrainingSet=[]
rawTestSet=[]
testSet=[]
trainingSet=[]
loadDataset(train_data, rawTrainingSet)
loadDataset(test_data, rawTestSet)
for x in rawTrainingSet:
newTS = np.append(x[0], ( np.array(x[1:])-np.mean(x[1:]) )/np.std(x[1:]) )
trainingSet.append(newTS)
for x in rawTestSet:
newTS = np.append(x[0], ( np.array(x[1:])-np.mean(x[1:]) )/np.std(x[1:]) )
testSet.append(newTS)
# print 'Train set: ' + repr(len(trainingSet))
# print trainingSet
# print 'Test set: ' + repr(len(testSet))
# print testSet
return trainingSet, testSet
###Output
_____no_output_____
###Markdown
Main
###Code
PATTERNS_1 = [(0,-1), (-1,-1), (-1,0)]
WEIGHTS_SYM_1 = [{(0,0):1}, {(0,0):2}, {(0,0):1}]
COUNT = 10
weights = []
for i in range(COUNT+1):
for j in range(COUNT-i+1):
k = COUNT - j - i
weights.append([{(0,0):i}, {(0,0):j}, {(0,0):k}])
# EDIT HERE
TRAIN_DATA = 'dataset/FaceAll_TRAIN'
TEST_DATA = 'dataset/FaceAll_TEST'
OUTPUT_FILE = 'acc_FaceAll_0.01band_no-dup-normalize.csv'
trainingSet, testSet = prepareData(TRAIN_DATA, TEST_DATA)
%timeit knn(trainingSet, testSet, 1, PATTERNS_1, WEIGHTS_SYM_1)
with open(OUTPUT_FILE, "w") as myfile:
myfile.write("i,j,k,accuracy\n")
for weight in weights:
i = weight[0][(0,0)]
j = weight[1][(0,0)]
k = weight[2][(0,0)]
print "i:", i, "j:", j,"k:", k
acc = knn(trainingSet, testSet, 1, PATTERNS_1, weight)
print acc
with open(OUTPUT_FILE, "a") as myfile:
myfile.write(str(i)+","+str(j)+","+str(k)+","+str(acc)+"\n")
###Output
i: 0 j: 0 k: 10
|
notebooks/gain-followers/04-modelling-evaluation-predictions.ipynb | ###Markdown
Datause glob to get all the csv files in the raw data folder.
###Code
profile_files = filenames.profile_folder_path.glob(os.path.join("*.csv"))
profile_appended_data = []
# loop over the list of csv files
for f in profile_files:
data = pd.read_csv(f)
profile_appended_data.append(data)
#profile_appended_data
df = pd.concat(profile_appended_data)
df.reset_index(drop=True, inplace=True)
###Output
_____no_output_____
###Markdown
Drop duplicate userid
###Code
df = df.drop_duplicates(subset=['userid'], keep='last').reset_index(drop=True)
###Output
_____no_output_____
###Markdown
Create Label for Followers
###Code
fpath = filenames.followers_path
follower = []
with open(fpath, newline='') as f:
for i in csv.reader(f):
follower.append(i[0])
df['is_follower'] = df['username'].isin(follower).astype(int)
###Output
_____no_output_____
###Markdown
Drop Variables
###Code
df.drop([
'userid','followed_by_viewer', 'igtvcount', 'blocked_by_viewer',
'follows_viewer', 'has_blocked_viewer', 'has_requested_viewer', 'external_url',
'is_verified', 'requested_by_viewer', 'profile_pic_url', 'similar_accounts', 'business_category_name', 'biography', 'full_name'
],
axis=1,
inplace=True)
###Output
_____no_output_____
###Markdown
To share with others to try the code in this notebook
###Code
#df.to_csv("../../data/deidentified_profile_data.csv", index = False)
df1 = df.copy()
df1.head()
df1.shape
###Output
_____no_output_____
###Markdown
Boxplot using plotly* To do - add to the dash application
###Code
fig = go.Figure()
fig.add_trace(go.Box(
y=df1['mediacount'],
name="Media Count",
jitter=0.3,
pointpos=-1.8,
boxpoints='all', # represent all points
marker_color='rgb(7,40,89)',
line_color='rgb(7,40,89)'
))
fig.add_trace(go.Box(
y=df1['followees'],
name="Followees",
boxpoints='suspectedoutliers', # only suspected outliers
marker=dict(
color='rgb(8,81,156)',
outliercolor='rgba(219, 64, 82, 0.6)',
line=dict(
outliercolor='rgba(219, 64, 82, 0.6)',
outlierwidth=2)),
line_color='rgb(8,81,156)'
))
fig.update_layout(title_text="Box Plot Media Count & Followees")
fig.show()
fig = go.Figure()
fig.add_trace(go.Box(
y=df1['followers'],
name="Followers",
boxpoints=False, # no data points
marker_color='rgb(9,56,125)',
line_color='rgb(9,56,125)'
))
fig.update_layout(title_text="Box Plot Followers")
fig.show()
fig = go.Figure()
fig.add_trace(go.Box(
y=df1['mediacount'],
x=df1['is_follower'],
name='Media Count',
marker_color='#3D9970'
))
fig.add_trace(go.Box(
y=df1['followees'],
x=df1['is_follower'],
name='Followees',
marker_color='#FF4136'
))
fig.add_trace(go.Box(
y=df1['followers'],
x=df1['is_follower'],
name='Followers',
marker_color='#FF851B'
))
fig.update_layout(
yaxis_title='Count',
boxmode='group' # group together boxes of the different traces for each value of x
)
fig.show()
df1['is_follower'].value_counts()
###Output
_____no_output_____
###Markdown
Skewed and Imbalanced Data Shuffle the dataframe
###Code
df1 = shuffle(df1)
df1.reset_index(inplace=True, drop=True)
df1.replace({False: 0, True: 1}, inplace=True)
###Output
_____no_output_____
###Markdown
Train Test Split
###Code
# Set Split Params
test_size = 0.33
random_state = 42
X = df1.drop(['is_follower'], axis = 1)
y = df1['is_follower'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)
print(f'Train dataset size: {X_train.shape[0]} \n')
print(f'Test dataset size: {X_test.shape[0]}')
###Output
_____no_output_____
###Markdown
Drop usernameUsername is joined to predictions for batch inference
###Code
X_train = X_train.drop(['username'], axis = 1)
X_test_new = X_test.drop(['username'], axis = 1)
X.columns
###Output
_____no_output_____
###Markdown
PreprocessingThe best practice is to imagine you have deployed your model, and it is being used to predict things. Imagine a single test case is provided to your model for testing, or your model tends to predict an input after deployment. In this scenario, you only have a single input, and therefore it doesn't make sense to use it as fitting data for a Standard Scaler which gets this single instance as its training (fitting) data, because in this case, the output of the scaler would be different in terms of scale with every other input. Thus, you'd better train (fit) the Standard Scaler using the training set (i.e. after splitting) and transform the rest of data (including the validation set, your test set, and whatever data comes into your model after deployment) using the fitted Standard Scaler.Moreover, in every stage of a machine learning project, you'd better use only the training data for fitting and training whatever you need (e.g. scalers, predictors, regressors, etc.) and leave the validation and test data only for validation and testing.For cross-validation case, you'd better fit a scaler and transform your data within cross-validation, but it generally doesn't make much difference. You can test it though. Why Pipelines?The machine learning workflow consists of many steps from data preparation (e.g., dealing with missing values, scaling/encoding, feature extraction). When first learning this workflow, we perform the data preparation one step at a time. This can become time consuming since we need to perform the preparation steps to both the training and testing data. Pipelines allow us to streamline this process by compiling the preparation steps while easing the task of model tuning and monitoring. Scikit-Learn’s Pipeline class provides a structure for applying a series of data transformations followed by an estimator [link](https://scikit-learn.org/stable/common_pitfalls.html)[link](https://towardsdatascience.com/machine-learning-pipelines-with-scikit-learn-d43c32a6aa52)
###Code
num_selector = ['mediacount','followers','followees']
ordinal_selector = ['is_private', 'is_business_account', 'has_public_story']
num_processor = StandardScaler()
ordinal_processor = OrdinalEncoder()
preprocess = make_column_transformer(
(ordinal_processor, ordinal_selector),
(num_processor, num_selector)
)
X_train = preprocess.fit_transform(X_train)
# transform test set
X_test_new = preprocess.transform(X_test_new)
len(X_test_new)
###Output
_____no_output_____
###Markdown
Remove outliers Not all data is normal or normal enough to treat it as being drawn from a Gaussian distribution. A good statistic for summarizing a non-Gaussian distribution sample of data is the Interquartile Range, or IQR for short. Unsupervised Outlier Detection using the Local Outlier Factor (LOF).The anomaly score of each sample is called the Local Outlier Factor. It measures the local deviation of the density of a given sample with respect to its neighbors. It is local in that the anomaly score depends on how isolated the object is with respect to the surrounding neighborhood. More precisely, locality is given by k-nearest neighbors, whose distance is used to estimate the local density. By comparing the local density of a sample to the local densities of its neighbors, one can identify samples that have a substantially lower density than their neighbors. These are considered outliers.
###Code
# identify outliers in the training dataset
lof = LocalOutlierFactor()
yhat = lof.fit_predict(X_train)
# select all rows that are not outliers
mask = yhat != -1
X_train, y_train = X_train[mask, :], y_train[mask]
# summarize the shape of the updated training dataset
print(X_train.shape, y_train.shape)
###Output
_____no_output_____
###Markdown
Imbalanced DatasetSome common over-sampling and under-sampling techniques in imbalanced-learn are imblearn.over_sampling.RandomOverSampler, imblearn.under_sampling.RandomUnderSampler, and imblearn.SMOTE. For these libraries there is a nice parameter that allows the user to change the sampling ratio.For example, in SMOTE, to change the ratio you would input a dictionary, and all values must be greater than or equal to the largest class (since SMOTE is an over-sampling technique). The reason I have found SMOTE to be a better fit for model performance is probably because with RandomOverSampler you are duplicating rows, which means the model can start to memorize the data rather than generalize to new data. SMOTE uses the K-Nearest-Neighbors algorithm to make "similar" data points to those under sampled ones.It is not good practice to blindly use SMOTE, setting the ratio to it's default (even class balance) because the model may overfit one or more of the minority classes (even though SMOTE is using nearest neighbors to make "similar" observations). In a similar way that you tune hyperparameters of a ML model you will tune the hyperparameters of the SMOTE algorithm, such as the ratio and/or knn. NOTE: It is vital that you do not use SMOTE on the full data set. You MUST use SMOTE on the training set only (after you split). Then validate on your val/test sets and see if your SMOTE model out performed your other model(s). If you do not do this there will be data leakage and your model is essentially cheating.[link](https://www.analyticsvidhya.com/blog/2020/10/overcoming-class-imbalance-using-smote-techniques/)
###Code
# Total of 1 before SMOTE
y_train.sum()
sm = SMOTE(random_state=0, n_jobs=8 , sampling_strategy='minority', k_neighbors=7)
X_train, y_train = sm.fit_resample(X_train, y_train)
# Total of 1 after SMOTE
y_train.sum()
###Output
_____no_output_____
###Markdown
ModellingTraining a model simply means learning (determining) good values for all the weights and the bias from labeled examples. In supervised learning, a machine learning algorithm builds a model by examining many examples and attempting to find a model that minimizes loss; this process is called empirical risk minimization.Loss is the penalty for a bad prediction. That is, loss is a number indicating how bad the model's prediction was on a single example. If the model's prediction is perfect, the loss is zero; otherwise, the loss is greater. The goal of training a model is to find a set of weights and biases that have low loss, on average, across all examples. Gaussian NB
###Code
gnb = GaussianNB()
# Fit on the training data
gnb_model = gnb.fit(X_train, y_train)
# Predict on the testing data
predictions=gnb_model.predict(X_test_new)
probabilities = gnb_model.predict_proba(X_test_new)[:,1]
# Calculate the roc-auc score
auc_nb=metrics.roc_auc_score(y_test, predictions)
acc_nb = metrics.accuracy_score(y_test, predictions)
f1_nb = metrics.f1_score(y_test, predictions)
# Display
print('F1 Score', "%.4f" % round(f1_nb,4))
print('Accuracy', "%.4f" % round(acc_nb,4))
print('AUC Score', "%.4f" % round(auc_nb,4))
###Output
_____no_output_____
###Markdown
Logistic Regression
###Code
logreg = LogisticRegression()
# Fit on the training data
log_model=logreg.fit(X_train, y_train)
# Predict on the testing data
predictions=log_model.predict(X_test_new)
probabilities = log_model.predict_proba(X_test_new)[:,1]
# Calculate the roc-auc score
auc_log=metrics.roc_auc_score(y_test, predictions)
acc_log = metrics.accuracy_score(y_test, predictions)
f1_log = metrics.f1_score(y_test, predictions)
# Display
print('F1 Score', "%.4f" % round(f1_log,4))
print('Accuracy', "%.4f" % round(acc_log,4))
print('AUC Score', "%.4f" % round(auc_log,4))
###Output
_____no_output_____
###Markdown
KNeighborsClassifier
###Code
knn = KNeighborsClassifier(n_neighbors=4)
# Fit on the training data
knn_model=knn.fit(X_train, y_train)
# Predict on the testing data
predictions=knn_model.predict(X_test_new)
probabilities = knn_model.predict_proba(X_test_new)[:,1]
# Calculate the roc-auc score
auc_knn=metrics.roc_auc_score(y_test, predictions)
acc_knn = metrics.accuracy_score(y_test, predictions)
f1_knn = metrics.f1_score(y_test, predictions)
# Display
print('F1 Score', "%.4f" % round(f1_knn,4))
print('Accuracy', "%.4f" % round(acc_knn,4))
print('AUC Score', "%.4f" % round(auc_knn,4))
###Output
_____no_output_____
###Markdown
RandomForestClassifier
###Code
rf = RandomForestClassifier()
# Fit on the training data
rf_model=rf.fit(X_train, y_train)
# Predict on the testing data
predictions=rf_model.predict(X_test_new)
probabilities = rf_model.predict_proba(X_test_new)[:,1]
# Calculate the roc-auc score
auc_rf=metrics.roc_auc_score(y_test, predictions)
acc_rf = metrics.accuracy_score(y_test, predictions)
f1_rf = metrics.f1_score(y_test, predictions)
# Display
print('F1 Score', "%.4f" % round(f1_rf,4))
print('Accuracy', "%.4f" % round(acc_rf,4))
print('AUC Score', "%.4f" % round(auc_rf,4))
###Output
_____no_output_____
###Markdown
HistGradientBoostingClassifier[link](https://machinelearningmastery.com/histogram-based-gradient-boosting-ensembles/)
###Code
hist = HistGradientBoostingClassifier()
# Fit on the training data
hist_model=hist.fit(X_train, y_train)
# Predict on the testing data
predictions=hist_model.predict(X_test_new)
probabilities = hist_model.predict_proba(X_test_new)[:,1]
# Calculate the roc-auc score
auc_hist=metrics.roc_auc_score(y_test, predictions)
acc_hist = metrics.accuracy_score(y_test, predictions)
f1_hist = metrics.f1_score(y_test, predictions)
# Display
print('F1 Score', "%.4f" % round(f1_hist,4))
print('Accuracy', "%.4f" % round(acc_hist,4))
print('AUC Score', "%.4f" % round(auc_hist,4))
###Output
_____no_output_____
###Markdown
Comparison of 5 different Models
###Code
# create lists from the metrics we produced.
f1=[f1_nb, f1_log, f1_knn, f1_rf, f1_hist]
acc=[acc_nb, acc_log, acc_knn, acc_rf, acc_hist]
auc=[auc_nb, auc_log, auc_knn, auc_rf, auc_hist]
# Define a function that will round our metrics.
def rounder(metric):
scores_list=[]
for score in metric:
scores_list.append(round(float(score*100),1))
return scores_list
# Apply it to each of the three lists.
f1_scores=rounder(f1)
acc_scores=rounder(acc)
auc_scores=rounder(auc)
score_types=['F1 score', 'Accuracy', 'AUC score']
# Comparison of model metrics
models=['naive bayes', 'logistic regression', 'k-nearest neighbors', 'random forest', 'hist gradient boosting']
index=['F1 score', 'Accuracy', 'AUC score']
compare_models=pd.DataFrame([f1_scores, acc_scores, auc_scores], index=index, columns=models)
compare_models
# save to csv, for later use by plotly dash app.
compare_models.to_csv('../../resources/compare_models.csv', index=True)
pd.read_csv('../../resources/compare_models.csv', index_col=0)
# Let's display that with plotly.
fig = go.Figure()
fig.add_trace(go.Bar(
x=compare_models.loc['F1 score'].index,
y=compare_models.loc['F1 score'],
name=compare_models.index[0],
marker_color='rgb(107,174,214)'
))
fig.add_trace(go.Bar(
x=compare_models.loc['Accuracy'].index,
y=compare_models.loc['Accuracy'],
name=compare_models.index[1],
marker_color='rgba(219, 64, 82, 0.6)'
))
fig.add_trace(go.Bar(
x=compare_models.loc['AUC score'].index,
y=compare_models.loc['AUC score'],
name=compare_models.index[2],
marker_color='rgb(7,40,89)'
))
fig.update_layout(
title='Comparison of Possible Models',
xaxis = dict(title = 'Predictive models'), # x-axis label
yaxis = dict(title = 'Score'), # y-axis label
)
fig
###Output
_____no_output_____
###Markdown
Random Forest has the best performance Tuning Random Forest Using Grid SearchWhen creating a machine learning model, you'll be presented with design choices as to how to define your model architecture. Often times, we don't immediately know what the optimal model architecture should be for a given model, and thus we'd like to be able to explore a range of possibilities. In true machine learning fashion, we'll ideally ask the machine to perform this exploration and select the optimal model architecture automatically. Parameters which define the model architecture are referred to as hyperparameters and thus this process of searching for the ideal model architecture is referred to as hyperparameter tuning.These hyperparameters might address model design questions such as:* What degree of polynomial features should I use for my linear model?* What should be the maximum depth allowed for my decision tree?* What should be the minimum number of samples required at a leaf node in my decision tree?* How many trees should I include in my random forest?* How many neurons should I have in my neural network layer?* How many layers should I have in my neural network?* What should I set my learning rate to for gradient descent?Hyperparameters are not model parameters and they cannot be directly trained from the data. Model parameters are learned during training when we optimize a loss function using something like gradient descent.Whereas the model parameters specify how to transform the input data into the desired output, the hyperparameters define how our model is actually structured.In general, this process includes:1. Define a model2. Define the range of possible values for all hyperparameters3. Define a method for sampling hyperparameter values4. Define an evaluative criteria to judge the model5. Define a cross-validation method Grid searchGrid search is arguably the most basic hyperparameter tuning method. With this technique, we simply build a model for each possible combination of all of the hyperparameter values provided, evaluating each model, and selecting the architecture which produces the best results.* https://www.jeremyjordan.me/hyperparameter-tuning/
###Code
rfc=RandomForestClassifier(random_state=42)
param_grid = {
'n_estimators': [10, 100],
'max_features': ['auto', 'sqrt', 'log2'],
'max_depth' : [4,5,6,7,8],
'criterion' :['gini', 'entropy']
}
# Create grid search using 5-fold cross validation
grid_rfc = GridSearchCV(estimator=rfc, param_grid=param_grid, cv= 5, n_jobs = 1, verbose=0)
grid_rfc.fit(X_train, y_train)
print(grid_rfc.best_params_)
model = grid_rfc
###Output
_____no_output_____
###Markdown
Predict on the testing dataAfter the model is built, testing data once again validates that it can make accurate predictions. If training and validation data include labels to monitor performance metrics of the model, the testing data should be unlabeled. Test data provides a final, real-world check of an unseen dataset to confirm that the ML algorithm was trained effectively.* https://www.applause.com/blog/training-data-validation-data-vs-test-data
###Code
predictions=model.predict(X_test_new)
probabilities = model.predict_proba(X_test_new)[:,1]
# Pickle the final model for use in the plotly dash app.
file = open('../../resources/final_model.pkl', 'wb')
pickle.dump(model, file)
file.close()
###Output
_____no_output_____
###Markdown
Final Model Metrics
###Code
# Full list of metrics
def model_metrics(y_test, predictions):
'''
Calculate 5 standard model metrics
Return a dictionary with the metrics
'''
f1 = metrics.f1_score(y_test, predictions)
accuracy = metrics.accuracy_score(y_test, predictions)
error = 1 - accuracy
precision = metrics.precision_score(y_test, predictions)
recall = metrics.recall_score(y_test, predictions)
rocauc = metrics.roc_auc_score(y_test, predictions)
return {'precision': precision, 'recall': recall,'f1 score':f1, 'accuracy': accuracy, 'error rate': error, 'ROC-AUC': rocauc}
eval_scores=model_metrics(y_test, predictions)
eval_scores
# Round the y values.
y_vals=[]
for val in list(eval_scores.values()):
y_vals.append(round(float(val*100),1))
y_vals
# Write over the previous dictionary with the rounded values.
eval_scores=dict(zip(eval_scores.keys(), y_vals))
print(eval_scores)
# Now save that dictionary to a pickle file, for later use in plotly dash app
file = open('../../resources/eval_scores.pkl', 'wb')
pickle.dump(eval_scores, file)
file.close()
# And here's a reminder of how to read that back in again, just in case this is unfamiliar:
file = open('../../resources/eval_scores.pkl', 'rb')
evals=pickle.load(file)
file.close()
evals
X_test.shape
# Convert that into a visualization.
fig = go.Figure()
fig.add_trace(go.Bar(
x=list(evals.keys()),
y=list(evals.values())
))
fig.update_traces(marker_color='rgb(107,174,214)', marker_line_color='rgb(8,48,107)',
marker_line_width=1.5, opacity=0.6)
fig.update_layout(
title='Evaluation Metrics for Random Forest Model (Testing Dataset = 578 profiles)',
xaxis = {'title': 'Metrics'},
yaxis = {'title': 'Percent'},
)
fig
###Output
_____no_output_____
###Markdown
Precison and RecallPrecision is how many of the returned hits were true positive i.e. how many of the found were correct hits.Recall literally is how many of the true positives were recalled (found), i.e. how many of the correct hits were also found.
###Code
FPR, TPR, _ = roc_curve(y_test, probabilities)
#FPR
###Output
_____no_output_____
###Markdown
ROC curveAn ROC curve (receiver operating characteristic curve) is a graph showing the performance of a classification model at all classification thresholds. This curve plots two parameters:* True Positive Rate* False Positive RateTrue Positive Rate (TPR) is a synonym for recall.An ROC curve plots TPR vs. FPR at different classification thresholds. Lowering the classification threshold classifies more items as positive, thus increasing both False Positives and True Positives. [link](https://developers.google.com/machine-learning/crash-course/classification/roc-and-auc:~:text=An%20ROC%20curve%20(receiver%20operating,False%20Positive%20Rate)
###Code
roc_score=round(100*roc_auc_score(y_test, predictions),1)
roc_score
roc_dict={'FPR':list(FPR),
'TPR':list(TPR),
'y_test':[int(i) for i in y_test],
'predictions':[int(i) for i in predictions]
}
# Save everything we need to reproduce the ROC-AUC figure in plotly dash.
with open('../../resources/roc_dict.json', 'w') as f:
json.dump(roc_dict, f)
with open('../../resources/roc_dict.json') as json_file:
roc_dict = json.load(json_file)
FPR=roc_dict['FPR']
TPR=roc_dict['TPR']
y_test=pd.Series(roc_dict['y_test'])
predictions=roc_dict['predictions']
# ROC-AUC figure
roc_score=round(100*roc_auc_score(y_test, predictions),1)
fig = go.Figure()
fig.add_trace(go.Scatter(
x=FPR,
y=TPR,
mode='lines',
name=f'AUC: {roc_score}',
marker_color='rgb(150,150,150)'
))
fig.add_trace(go.Scatter(
x=[0,1],
y=[0,1],
mode='lines',
name='Baseline Area: 50.0',
marker_color='rgb(37,37,37)'
))
fig.update_layout(
title='Receiver Operating Characteristic (ROC): Area Under Curve',
xaxis={'title': 'False Positive Rate (100-Specificity)','scaleratio': 1,'scaleanchor': 'y'},
yaxis={'title': 'True Positive Rate (Sensitivity)'}
)
fig.show()
###Output
_____no_output_____
###Markdown
Confusion MatrixA confusion matrix is a summary of prediction results on a classification problem. The number of correct and incorrect predictions are summarized with count values and broken down by each class
###Code
# A confusion matrix tells us our false positives and false negatives:
matrix=confusion_matrix(y_test, predictions)
print(matrix)
cm=pd.DataFrame(matrix, columns=['pred: follower', 'pred: non-follower'])
cm[f'n={len(y_test)}']=['actual: follower', 'actual: non-follower']
cm=cm[[f'n={len(y_test)}', 'pred: follower', 'pred: non-follower']]
cm
# Save cm dataframe to a pickle file, for later use in plotly dash app
cm.to_csv('../../resources/confusion_matrix.csv', index=False)
cm=pd.read_csv('../../resources/confusion_matrix.csv')
cm
# Display the confusion matrix as a formatted table with Plotly
fig = go.Figure()
fig.add_trace(go.Table(
header=dict(values=cm.columns,
line = dict(color='rgb(150,150,150)'),
fill = dict(color='rgb(150,150,150)'),
align = ['left'] * 5),
cells=dict(values=[cm[f'n={len(y_test)}'], cm['pred: follower'], cm['pred: non-follower']],
line = dict(color='#7D7F80'),
fill = dict(color='white'),
align = ['left'] * 5)))
fig.update_layout(
title = f'Confusion Matrix: Random Forest Model (Testing Dataset)'
)
fig
###Output
_____no_output_____
###Markdown
Random Forest Feature ImportanceThe feature importance (variable importance) describes which features are relevant. It can help with better understanding of the solved problem and sometimes lead to model improvements by employing the feature selection.The Random Forest algorithm has built-in feature importance which can be computed in two ways: Gini importance (or mean decrease impurity), which is computed from the Random Forest structure. Let’s look how the Random Forest is constructed. It is a set of Decision Trees. Each Decision Tree is a set of internal nodes and leaves. In the internal node, the selected feature is used to make decision how to divide the data set into two separate sets with similars responses within. The features for internal nodes are selected with some criterion, which for classification tasks can be gini impurity or infomation gain, and for regression is variance reduction. We can measure how each feature decrease the impurity of the split (the feature with highest decrease is selected for internal node). For each feature we can collect how on average it decreases the impurity. The average over all trees in the forest is the measure of the feature importance.This biggest advantage of this method is a speed of computation - all needed values are computed during the Radom Forest training. The drawbacks of the method is to tendency to prefer (select as important) numerical features and categorical features with high cardinality. What is more, in the case of correlated features it can select one of the feature and neglect the importance of the second one. Mean Decrease Accuracy - is a method of computing the feature importance on permuted out-of-bag (OOB) samples based on mean decrease in the accuracy. This method is not implemented in the scikit-learn package. The very similar to this method is [permutation based importance](https://scikit-learn.org/stable/modules/generated/sklearn.inspection.permutation_importance.htmlsklearn.inspection.permutation_importance).The permutation based importance is computationally expensive. The permutation based method can have problem with highly-correlated features, it can report them as unimportant.Feature Importance Computed with SHAP Valuesmodel-agnostic and works well with algorithms not from scikit-learn: Xgboost, Neural Networks (keras+tensorflow), LigthGBM, CatBoost. It can provide more information like decision plots or dependence plots.[link](https://mljar.com/blog/feature-importance-in-random-forest/:~:text=Random%20Forest%20Built%2Din%20Feature%20Importance&text=It%20is%20a%20set%20of,sets%20with%20similars%20responses%20within.)
###Code
model.best_estimator_.feature_importances_
X_test.columns
# Feature importance (Random Forest)
coeffs1=pd.DataFrame(list(zip(list(X_test.columns), model.best_estimator_.feature_importances_)), columns=['feature', 'coefficient'])
coeffs=coeffs1.sort_values(by='coefficient', ascending=False)
# Format the coefficients.
y_vals=[]
for val in list(coeffs['coefficient']):
y_vals.append(round(float(val),2))
y_vals
coeffs['coefficient']=y_vals
coeffs
# save the results to a csv file, for later use by plotly dash app.
coeffs.to_csv('../../resources/coefficients.csv', index=False)
# Let's display that with Plotly.
fig = go.Figure()
fig.add_trace(go.Bar(
x=coeffs['feature'],
y=coeffs['coefficient']
))
fig.update_traces(marker_color='rgb(158,202,225)', marker_line_color='rgb(8,48,107)',
marker_line_width=1.5, opacity=0.6)
fig.update_layout(
title='Number of Followers is a good indication of becoming a follower than a business account ',
xaxis = {'title': 'Instagram Features'},
yaxis = {'title': 'Odds of Becoming a Follower'},
)
fig
print(len(probabilities))
print(len(predictions))
print(len(y_test))
print(len(X_test))
###Output
_____no_output_____
###Markdown
Merge usernames to probabilitiesOne of the most important things you can do before deploying a model is try to understand model drift in an offline environment. Data scientists should seek to answer the question "If I train a model using this set of features on data from six months ago, and I apply it to data that I generated today, how much worse is the model than the one that I created untrained off of data from a month ago and applied to today?". Performing this analysis offline allows you to estimate the rate at which a model’s performance falls off and how often you’ll need to retrain.
###Code
probs=pd.DataFrame(probabilities, columns=['follower_probability'])
probs.shape
actual_df = pd.DataFrame(y_test, columns=['actual'])
X_test = X_test.reset_index(drop = True)
X_test.head(1)
X_test['is_private'].dtype
final = pd.concat([X_test, probs, actual_df], axis = 1)
# save to display in plotly dash app
final.to_csv('../../resources/final_probs.csv', index=False)
mydata=final.drop(['follower_probability'], axis=1)
fig = go.Figure()
fig.add_trace(go.Table(
header=dict(values=list(mydata.columns)),
cells=dict(values=list(mydata.loc[5]))))
fig
###Output
_____no_output_____
###Markdown
Train the selected model on whole dataset before putting into productionTheoretically, the more data your deployed model has seen, the better is should generalise. So if you trained the model on the full set of data you have available, it should generalise better than a model which only saw for example train/val sets (e.g. ~ 90%) from the full data set.For a given model (a functional form), changing the sample size will only affect Var(f^(x0)); namely, increasing the sample will diminish it. Meanwhile, Bias2(f^(x0)) will stay the same as the functional form f^(⋅) is fixed. (Clearly, the irreducible error also stays the same.)You reduce the expected squared error by reestimating the chosen model on the full sample as compared to having estimated it on just the training sample.)Unless you're limiting yourself to a simple class of convex models/loss functions, you're considerably better off keeping a final test split.[link](https://stats.stackexchange.com/questions/225820/is-it-needed-to-train-the-selected-model-again-on-entire-data-before-putting-in)[link](https://datascience.stackexchange.com/questions/33008/is-it-always-better-to-use-the-whole-dataset-to-train-the-final-model)
###Code
X_all = X.drop('username', axis = 1)
###Output
_____no_output_____
###Markdown
Preprocess on all the data
###Code
X_full = preprocess.fit_transform(X_all)
# save to use later for individual datapoint or for batch
joblib.dump(preprocess, "../../resources/preprocess.joblib")
###Output
_____no_output_____
###Markdown
Fit model on all the data
###Code
model.fit(X_full, y)
###Output
_____no_output_____
###Markdown
PredictionsMachine learning prediction and inference are two different aspects of machine learning. Prediction is the ability to accurately predict a response variable while inference deals with understanding the relationship between predictor variables and response variables. The difference in prediction vs inference models can be seen in examples such as predicting marketing campaign success or understanding how media influences sales on promotions, etc. Inference: You want to find out what the effect of Age, Passenger Class and, Gender has on surviving the Titanic Disaster. You can put up a logistic regression and infer the effect each passenger characteristic has on survival rates.Prediction: Given some information on a Titanic passenger, you want to choose from the set {lives,dies} and be correct as often as possible. You may use linear regression for an inference model while non-linear methods work best when prediction is your objective.[link](https://stats.stackexchange.com/questions/244017/what-is-the-difference-between-prediction-and-inference)[link](https://cloud.google.com/ai-platform/prediction/docs/overview)
###Code
# use glob to get all the csv files in the raw data folder.
prediction_files = filenames.others_profile_folder_path.glob(os.path.join("*.csv"))
prediction_appended_data = []
# loop over the list of csv files
for f in prediction_files:
data = pd.read_csv(f)
prediction_appended_data.append(data)
df_prediction = pd.concat(prediction_appended_data)
df_prediction.reset_index(drop=True, inplace=True)
df_prediction.shape
#### Drop variables
dfi2 = df_prediction.drop([
'userid', 'username','followed_by_viewer', 'igtvcount', 'blocked_by_viewer',
'follows_viewer', 'has_blocked_viewer', 'has_requested_viewer', 'external_url',
'is_verified', 'requested_by_viewer', 'profile_pic_url', 'similar_accounts', 'business_category_name', 'biography', 'full_name'
],
axis=1)
dfi2.head()
###Output
_____no_output_____
###Markdown
Load preprocess
###Code
preprocess = joblib.load('../../resources/preprocess.joblib')
###Output
_____no_output_____
###Markdown
Online versus batch predictionThe needs of your application dictate the type of prediction you should use.You should generally use online prediction (sometimes called HTTP prediction) when you are making requests in response to application input or in other situations where timely inference is needed.Batch prediction is ideal for processing accumulated data when you don't need immediate results. For example a periodic job that gets predictions for all data collected since the last job.In both cases, you pass input data to a cloud-hosted machine-learning model and get inferences for each data instance. The differences are shown as follows: Online prediction * Optimized to minimize the latency of serving predictions. * Can process one or more instances per request. * Predictions returned in the response message. * Input data passed directly as a JSON string. * Returns as soon as possible. Batch prediction* Optimized to handle a high volume of instances in a job and to run more complex models.* Can process one or more instances per request.* Predictions written to output files in a Cloud Storage location that you specify.* Input data passed indirectly as one or more URIs of files in Cloud Storage locations.* Asynchronous request. Online Prediction
###Code
inputs=[300, 1200, 500, 0, 0, 1]
keys=['mediacount', 'followers', 'followees', 'is_private', 'is_business_account', 'has_public_story']
dict6=dict(zip(keys, inputs))
test=pd.DataFrame([dict6])
test
test=test[['is_private', 'mediacount', 'followers', 'followees', 'is_business_account', 'has_public_story']]
test_array = preprocess.transform(test)
# unpickle the final model
file = open('../../resources/final_model.pkl', 'rb')
model=pickle.load(file)
file.close()
model.predict_proba(test_array)
model.predict(test_array)
###Output
_____no_output_____
###Markdown
Prediction for 1 profile to become a follower Batch PredictionsBatch prediction latencyIf you use a simple model and a small set of input instances, you'll find that there is a considerable difference between how long it takes to finish identical prediction requests using online versus batch prediction. It might take a batch job several minutes to complete predictions that are returned almost instantly by an online request. This is a side-effect of the different infrastructure used by the two methods of prediction. Sagemaker allocates and initializes resources for a batch prediction job when you send the request. Online prediction is typically ready to process at the time of request.
###Code
Xi = preprocess.transform(dfi2)
#Predictions
predictions_batch=model.predict(Xi)
probabilities_batch = model.predict_proba(Xi)[:,1]
#probabilities_batch
###Output
_____no_output_____
###Markdown
List of profiles to follow
###Code
contact = pd.DataFrame(model.predict(Xi))
predicted_profiles = pd.concat([df_prediction['username'], contact], axis = 1)
follow_profiles = predicted_profiles[predicted_profiles[0] == 1]
follow_profiles
###Output
_____no_output_____
###Markdown
Data for Model RetrainingModel retraining should not result in a different model generating process. Rather retraining simply refers to re-running the process that generated the previously selected model on a new training set of data. The features, model algorithm, and hyperparameter search space should all remain the same. One way to think about this is that retraining doesn’t involve any code changes. It only involves changing the training data set.Rather than deploying a model once and moving on to another project, machine learning practitioners need to retrain their models if they find that the data distributions have deviated significantly from those of the original training set. While the frequency of retraining will vary from problem-to-problem, ML engineers can start with a simple strategy that retrains models on a periodic basis as new data arrives and evolve to more complex processes that quantify and react to model drift.This concept, known as model drift, can be mitigated but involves additional overhead in the forms of monitoring infrastructure, oversight, and process.Monitor model — Test your deployment to ensure that your model is still performing as expected on test data with respect to your evaluation metrics and things like inference speed.Evaluate new data — Using a model in production means you will frequently pass brand new data through the model that it has never been tested on. It’s important to perform evaluation and dig into specific samples to see how your model performs on any new data it encounters.Continue understanding model — Some errors and biases in your model can be deep-seated and take a long time to uncover. You need to continuously test and probe your model for various edge cases and trends that could cause problems if they were to be discovered by clients instead.Expand capabilities — Even if everything is working perfectly, it’s possible that the model isn’t increasing profits as much as you hoped. From adding new classes, developing new data streams, and making the model more efficient there are countless ways to expand the capabilities of your current model to make it even better. Any time you want to improve your system, you will need to restart the ML lifecycle to update your data, model, and evaluate it all to make sure your new features work as expected.[link](https://mlinproduction.com/model-retraining/:~:text=Rather%20than%20deploying%20a%20model,of%20the%20original%20training%20set.)
###Code
retrain_data = df_prediction[df_prediction['username'].isin(follow_profiles['username'])]
file_time = datetime.now().strftime("%Y-%m-%d_%I-%M-%S_%p")
# file name
ext =".csv"
profile_name_path = str(filenames.profile_path) + file_time + ext
retrain_data.to_csv(profile_name_path, index = False)
###Output
_____no_output_____ |
evaluations/simulation/5-query.simulation-alpha-2.0.ipynb | ###Markdown
1. Parameters
###Code
simulation_dir = 'simulations/unset'
metadata_file = 'input/metadata.tsv.gz'
# Parameters
read_coverage = 30
mincov = 10
simulation_dir = "simulations/alpha-2.0-cov-30"
iterations = 3
sub_alpha = 2.0
from pathlib import Path
import imp
fp, pathname, description = imp.find_module('gdi_benchmark', ['../../lib'])
gdi_benchmark = imp.load_module('gdi_benchmark', fp, pathname, description)
simulation_dir_path = Path(simulation_dir)
case_name = str(simulation_dir_path.name)
index_reads_path = simulation_dir_path / 'index-reads'
index_assemblies_path = simulation_dir_path / 'index-assemblies'
output_api_reads_path = simulation_dir_path / 'query-reads-api.tsv'
output_api_assemblies_path = simulation_dir_path / 'query-assemblies-api.tsv'
output_cli_reads_path = simulation_dir_path / 'query-reads-cli.tsv'
output_cli_assemblies_path = simulation_dir_path / 'query-assemblies-cli.tsv'
###Output
_____no_output_____
###Markdown
2. Benchmark command-line
###Code
import pandas as pd
import genomics_data_index.api as gdi
def benchmark_cli_index(name: str, index_path: Path) -> pd.DataFrame:
db = gdi.GenomicsDataIndex.connect(index_path)
mutations_df = db.mutations_summary(reference_name='reference').sort_values('Count', ascending=False)
top_mutation = mutations_df.iloc[0].name
if 'chrom' not in top_mutation:
raise Exception(f'Does not exist a single mutation for index {index_path}')
else:
print(f'top_mutation={top_mutation}')
benchmark_commands = {
'query hasa': f'gdi --project-dir {index_path} --ncores 1 query "hasa:{top_mutation}"',
'query isa': f'gdi --project-dir {index_path} --ncores 1 query "isa:SH13-007"',
'query --summary': f'gdi --project-dir {index_path} --ncores 1 query --summary',
'query --features-summary': f'gdi --project-dir {index_path} --ncores 1 query --features-summary mutations',
'query isin': f'gdi --project-dir {index_path} --ncores 1 query --reference-name reference "isin_100_substitutions:SH13-007"',
'list samples': f'gdi --project-dir {index_path} --ncores 1 list samples',
}
number_samples = db.count_samples()
number_features_no_unknown = db.count_mutations(reference_genome='reference', include_unknown=False)
number_features_all = db.count_mutations(reference_genome='reference', include_unknown=True)
iterations = 10
benchmarker = gdi_benchmark.QueryBenchmarkHandler()
return benchmarker.benchmark_cli(name=name, kind_commands=benchmark_commands,
number_samples=number_samples,
number_features_no_unknown=number_features_no_unknown,
number_features_all=number_features_all,
iterations=iterations)
###Output
_____no_output_____
###Markdown
2.1. Benchmark reads
###Code
reads_cli_df = benchmark_cli_index(name=f'{case_name} (reads)', index_path=index_reads_path)
reads_cli_df.head(3)
reads_cli_df.to_csv(output_cli_reads_path, sep='\t', index=False)
###Output
_____no_output_____
###Markdown
2.1. Benchmark assemblies
###Code
assemblies_cli_df = benchmark_cli_index(name=f'{case_name} (reads)', index_path=index_assemblies_path)
assemblies_cli_df.head(3)
assemblies_cli_df.to_csv(output_cli_assemblies_path, sep='\t', index=False)
###Output
_____no_output_____
###Markdown
3. Test query API 3.1. Load (example) metadataThe simulated data is based off of real sample names and a real tree. So I can load up real metadata and attach it to a query (though the mutations and reference genome are all simulated).
###Code
import pandas as pd
metadata_df = pd.read_csv(metadata_file, sep='\t').rename({'Sample Name': 'Sample Name Orig'}, axis='columns')
metadata_df.head(2)
###Output
_____no_output_____
###Markdown
3.2. Define benchmark cases
###Code
from typing import List
import genomics_data_index.api as gdi
def benchmark_api_index(name: str, index_path: Path) -> pd.DataFrame:
db = gdi.GenomicsDataIndex.connect(index_path)
q_no_join = db.samples_query(reference_name='reference', universe='mutations')
q_join = db.samples_query(reference_name='reference', universe='mutations').join(metadata_df, sample_names_column='Sample Name Orig')
mutations_df = db.mutations_summary(reference_name='reference').sort_values('Count', ascending=False)
top_mutations = mutations_df.iloc[[0,1]].index.tolist()
if len(top_mutations) != 2:
raise Exception(f'Does not exist two mutations for index {index_path}')
else:
mutation1 = top_mutations[0]
mutation2 = top_mutations[1]
print(f'mutation1={mutation1}, mutation2={mutation2}')
q = q_join.hasa(mutation1)
r = q_join.hasa(mutation2)
number_samples = db.count_samples()
number_features_no_unknown = db.count_mutations(reference_genome='reference', include_unknown=False)
number_features_all = db.count_mutations(reference_genome='reference', include_unknown=True)
repeat = 10
benchmark_cases = {
'db.samples_query': lambda: db.samples_query(reference_name='reference', universe='mutations'),
'q.join': lambda: q_no_join.join(metadata_df, sample_names_column='Sample Name Orig'),
'q.features_summary': lambda: q_join.features_summary(),
'q.features_comparison': lambda: q_join.features_comparison(sample_categories='outbreak_number', categories_kind='dataframe', kind='mutations', unit='proportion'),
'q.hasa': lambda: q_join.hasa(mutation1),
'q.isa': lambda: q_join.isa("SH13-007"),
'q AND r': lambda: q & r,
'q.toframe': lambda: q_join.toframe(),
'q.summary': lambda: q_join.summary(),
'q.isin (distance)': lambda: q_join.isin("SH13-007", kind='distance', distance=100, units='substitutions'),
'q.isin (mrca)': lambda: q_join.isin(["SH13-007", "SH12-001"], kind='mrca'),
}
benchmarker = gdi_benchmark.QueryBenchmarkHandler()
return benchmarker.benchmark_api(name=name, kind_functions=benchmark_cases,
number_samples=number_samples,
number_features_no_unknown=number_features_no_unknown,
number_features_all=number_features_all,
repeat=repeat)
###Output
_____no_output_____
###Markdown
3.3. Benchmark reads index
###Code
reads_df = benchmark_api_index(name=f'{case_name} (reads)', index_path=index_reads_path)
reads_df.head(5)
reads_df.to_csv(output_api_reads_path, sep='\t', index=False)
###Output
_____no_output_____
###Markdown
3.4. Benchmark assemblies index
###Code
assemblies_df = benchmark_api_index(name=f'{case_name} (assemblies)', index_path=index_assemblies_path)
assemblies_df.head(5)
assemblies_df.to_csv(output_api_assemblies_path, sep='\t', index=False)
###Output
_____no_output_____ |
notebooks/Overfitting_1.0.ipynb | ###Markdown
 Data load
###Code
# try to parse the dates right at the beginning
# it works out of the box if the date was stored ISO YYYY-MM-DD format
df_analyse=pd.read_csv('../data/processed/COVID_small_sync_timelines.csv',sep=';')
# df_analyse.sort_values('date',ascending=True).head()
df_analyse.head()
df_analyse= df_analyse.rename(columns={"Unnamed: 0": "date"})
###Output
_____no_output_____
###Markdown
Helper functions
###Code
def quick_plot(x_in, df_input,y_scale='log',slider=False):
""" Quick basic plot for quick static evaluation of a time series
you can push selective columns of your data frame by .iloc[:,[0,6,7,8]]
Parameters:
----------
x_in : array
array of date time object, or array of numbers
df_input : pandas dataframe
the plotting matrix where each column is plotted
the name of the column will be used for the legend
scale: str
y-axis scale as 'log' or 'linear'
slider: bool
True or False for x-axis slider
Returns:
----------
"""
fig = go.Figure()
for each in df_input.columns:
fig.add_trace(go.Scatter(
x=x_in,
y=df_input[each],
name=each,
opacity=0.8))
fig.update_layout(autosize=True,
width=1024,
height=768,
font=dict(
family="PT Sans, monospace",
size=18,
color="#7f7f7f"
)
)
fig.update_yaxes(type=y_scale),
fig.update_xaxes(tickangle=-45,
nticks=20,
tickfont=dict(size=14,color="#7f7f7f")
)
if slider==True:
fig.update_layout(xaxis_rangeslider_visible=True)
fig.show()
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
quick_plot(df_analyse.date,
df_analyse.iloc[:,3:-1],
y_scale='log',
slider=True)
###Output
_____no_output_____
###Markdown
Fitting a polynomial curve I higly recommend section: Hyperparameters and Model Validation from Jake VanderPlas *This function is from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).* https://scikit-learn.org/stable/auto_examples/linear_model/plot_polynomial_interpolation.htmlsphx-glr-auto-examples-linear-model-plot-polynomial-interpolation-py
###Code
# check that all data are there
df_poly_check=df_analyse.iloc[0:27,3:-1].reset_index()
df_poly_check.head()
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
y=df_poly_check[['Germany','Afghanistan','Nigeria','Spain']].unstack().sort_index(axis=0,level=1)
y= df_analyse.loc[:150, "Spain"]
y.head()
y_len= len(y)
y_train=y[0:-(y_len//4)]
y_test=y[-(y_len//4):]
X_train=np.arange(len(y_train)).reshape(-1, 1)
X_test=np.arange(len(y_train),len(y)).reshape(-1, 1)
y_train[-5:]
%matplotlib inline
import matplotlib as mlp
import matplotlib.pyplot as plt
import seaborn;
seaborn.set(rc={'figure.figsize':(16,8)}) # plot formatting, prepare for subplot
fig, ax1 = plt.subplots(1, 1)
ax1.scatter(np.arange(len(y)),y, color='blue')
ax1.axvspan(len(y_train), len(y), facecolor='b', alpha=0.5)
for degree in [1]:
y_hat_insaple=PolynomialRegression(degree).fit(X_train, y_train).predict(X_train)
y_hat_test = PolynomialRegression(degree).fit(X_train, y_train).predict(X_test)
X_plot=np.concatenate((X_train, X_test), axis=None)
y_plot=np.concatenate((y_hat_insaple, y_hat_test), axis=None)
ax1.plot(X_plot, y_plot, label='degree={0}'.format(degree)+
' MAPE train: ' + str(mean_absolute_percentage_error(y_hat_insaple, y_train))[0:3]
+' MAPE test ' +str(mean_absolute_percentage_error(y_hat_test, y_test))[0:3])
#ax1.set_ylim(100, 15000000)
#ax1.set_yscale('log')
ax1.legend(loc='best',
prop={'size': 16});
###Output
_____no_output_____
###Markdown
Regression Metrics (source wikipedia) Mean Absolut Error In statistics, mean absolute error MAE''' is a measure of Errors between paired observations expressing the same phenomenon. Comparing examples of ''Y'' (forecasts) versus ''X'' (actual/observe) across time steps MAE is calculated as: $\mathrm{MAE} = \frac{\sum_{i=1}^n\left| y_i-x_i\right|}{n} =\frac{\sum_{i=1}^n\left| e_i \right|}{n}.$ Mean Absolut Percentage Error (MAPE) mean absolute percentage error (MAPE), also known as mean absolute percentage deviation (MAPD), is a measure of prediction accuracy of a forecasting method in [[statistics]],for example in trend estimation, also used as a loss function for regression problems in machine learning. It usually expresses the accuracy as a ratio defined by the formula:$\mbox{MAPE} = \frac{1}{n}\sum_{i=1}^n \left|\frac{x_i-y_i}{x_i}\right| $ if the MAPE is 80, on average, the forecast is off by 80%
###Code
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
###Output
_____no_output_____
###Markdown

###Code
df_analyse=pd.read_csv("../data/processed/COVID_small_sync_timeline_table.csv",sep=';',)
df_analyse.sort_values('date',ascending=True).head()
###Output
_____no_output_____
###Markdown
Helper Function
###Code
def quick_plot(x_in, df_input, y_scale='log',slider=False,mode_plot = 'markers+lines'):
""" Quick basic plot for quick static evaluation of a time series
you can push selective columns of your data frame by .iloc[:,[0,6,7,8]]
Parameters:
----------
x_in : array
array of date time object, or array of numbers
df_input : pandas dataframe
the plotting matrix where each column is plotted
the name of the column will be used for the legend
scale: str
y-axis scale as 'log' or 'linear'
slider: bool
True or False for x-axis slider
Returns:
----------
"""
fig = go.Figure()
for each in df_input.columns:
if 'doubling' in each:
fig.add_trace(go.Scatter(
x=x_in,
y=df_input[each],
mode = 'markers',
line=dict(color='Grey', width=4, dash='dot'),
name=each,
opacity=0.10))
else:
fig.add_trace(go.Scatter(
x=x_in,
y=df_input[each],
mode = mode_plot,
name=each,
opacity=0.8))
fig.update_layout(autosize=True,
width=1024,
height=768,
font=dict(
family="PT Sans, monospace",
size=18,
color="#7f7f7f"
)
)
fig.update_yaxes(type=y_scale),
fig.update_xaxes(tickangle=-45,
nticks=20,
tickfont=dict(size=14,color="#7f7f7f")
)
if slider==True:
fig.update_layout(xaxis_rangeslider_visible=True)
fig.show()
quick_plot(df_analyse.date,
df_analyse.iloc[:,3:-1],
mode_plot='lines',slider=True)
###Output
_____no_output_____
###Markdown
Fit Polynomial Curve
###Code
df_poly_check=df_analyse.iloc[0:120,4:-1].reset_index()
df_poly_check.tail()
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
def PolynomialRegression(degree=2,**kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
#The polynomialFeatures generates different possibilities of polynomial of given degree, (for example if degree is 2 and
# input variables are a and b the [1,a,,b,ab,a**2,b**2]). The polynomialFeatures is accompanied by a fit()command to convert
# the independent ariables of the training data to equivalent polynomials. This polnomials are then passed to LinearRegression,
# where the Linear rgression of the converted training set takes place. the LinearRegression function can be accompanied by a
# predict function to predict teh output.
y=df_poly_check[['Germany','US','Italy','India']].unstack().sort_index(axis=0,level=1)
y.head()
test_points=120
y_train=y[0:-test_points-1]
y_test=y[-test_points:]
X_train=np.arange(len(y_train)).reshape(-1,1)/4.0
X_test=np.arange(len(y_train),len(y_train)+test_points).reshape(-1,1)/4.0
import seaborn
seaborn.set(rc={'figure.figsize':(16,8)})
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
fig, ax1 = plt.subplots(1,1)
ax1.scatter(np.arange(len(y))/4,y, color='blue')
ax1.axvspan((len(y)-test_points-1)/4, len(y)/4, facecolor='b', alpha=0.5)
for degree in [1,3,7,12,50]:
y_hat_insample=PolynomialRegression(degree).fit(X_train,y_train).predict(X_train)
y_hat_test=PolynomialRegression(degree).fit(X_train,y_train).predict(X_test)
X_plot=np.concatenate((X_train,X_test),axis=None)
y_plot=np.concatenate((y_hat_insample,y_hat_test),axis=None)
ax1.plot(X_plot, y_plot, label='degree={0}'.format(degree)+
' MAPE train: ' + str(mean_absolute_percentage_error(y_hat_insample, y_train))[0:3]
+' MAPE test ' +str(mean_absolute_percentage_error(y_hat_test, y_test))[0:3])
ax1.set_ylim(100,3000000)
ax1.set_yscale('linear')
ax1.legend(loc='best',
prop={'size': 16});
###Output
_____no_output_____
###Markdown
 Data load
###Code
df_analyse=pd.read_csv('../data/processed/COVID_small_sync_timeline_table.csv',sep=';')
df_analyse.sort_values('date',ascending=True).head()
country_list = df_analyse.columns[1:]
###Output
_____no_output_____
###Markdown
Helper functions
###Code
def quick_plot(x_in, df_input,y_scale='log',slider=False):
""" Quick basic plot for quick static evaluation of a time series
you can push selective columns of your data frame by .iloc[:,[0,6,7,8]]
Parameters:
----------
x_in : array
array of date time object, or array of numbers
df_input : pandas dataframe
the plotting matrix where each column is plotted
the name of the column will be used for the legend
scale: str
y-axis scale as 'log' or 'linear'
slider: bool
True or False for x-axis slider
Returns:
----------
"""
fig = go.Figure()
for each in df_input.columns:
fig.add_trace(go.Scatter(
x=x_in,
y=df_input[each],
name=each,
opacity=0.8))
fig.update_layout(autosize=True,
width=1024,
height=768,
font=dict(
family="PT Sans, monospace",
size=18,
color="#7f7f7f"
)
)
fig.update_yaxes(type=y_scale),
fig.update_xaxes(tickangle=-45,
nticks=20,
tickfont=dict(size=14,color="#7f7f7f")
)
if slider==True:
fig.update_layout(xaxis_rangeslider_visible=True)
fig.show()
quick_plot(df_analyse.date,
df_analyse.iloc[:,3:-1],
y_scale='log',
slider=True)
###Output
_____no_output_____
###Markdown
Fitting a polynomial curve
###Code
# check that all data are there
df_poly_check=df_analyse.iloc[0:27,3:-1].reset_index()
df_poly_check.head()
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
y=df_poly_check[['Germany','Italy','US','Spain']].unstack().sort_index(axis=0,level=1)
y
test_points=28
y_train=y[0:-test_points-1]
y_test=y[-test_points:]
X_train=np.arange(len(y_train)).reshape(-1, 1)/4.0
X_test=np.arange(len(y_train),len(y_train)+test_points).reshape(-1, 1)/4.0
fig, ax1 = plt.subplots(1, 1)
ax1.scatter(np.
arange(len(y))/4,y, color='blue')
ax1.axvspan((len(y)-test_points-1)/4, len(y)/4, facecolor='b', alpha=0.5)
for degree in [1,3,7,15]:
y_hat_insaple=PolynomialRegression(degree).fit(X_train, y_train).predict(X_train)
y_hat_test = PolynomialRegression(degree).fit(X_train, y_train).predict(X_test)
X_plot=np.concatenate((X_train, X_test), axis=None)
y_plot=np.concatenate((y_hat_insaple, y_hat_test), axis=None)
ax1.plot(X_plot, y_plot, label='degree={0}'.format(degree)+
' MAPE train: ' + str(mean_absolute_percentage_error(y_hat_insaple, y_train))[0:3]
+' MAPE test ' +str(mean_absolute_percentage_error(y_hat_test, y_test))[0:3])
ax1.set_ylim(100, 15000000)
ax1.set_yscale('log')
ax1.legend(loc='best',
prop={'size': 16});
###Output
_____no_output_____
###Markdown

###Code
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
###Output
_____no_output_____
###Markdown
 Data Load
###Code
# try to parse the dates right at the beginning
# it works out of the box if the date was stored ISO YYYY-MM-DD format
df_analyse=pd.read_csv('../data/processed/COVID_small_sync_timeline_table.csv',sep=';')
df_analyse.sort_values('date',ascending=True).head()
country_list=df_analyse.columns[1:]
###Output
_____no_output_____
###Markdown
Helper function
###Code
def quick_plot(x_in, df_input,y_scale='log',slider=False):
""" Quick basic plot for quick static evaluation of a time series
you can push selective columns of your data frame by .iloc[:,[0,6,7,8]]
Parameters:
----------
x_in : array
array of date time object, or array of numbers
df_input : pandas dataframe
the plotting matrix where each column is plotted
the name of the column will be used for the legend
scale: str
y-axis scale as 'log' or 'linear'
slider: bool
True or False for x-axis slider
Returns:
----------
"""
fig = go.Figure()
for each in df_input.columns:
fig.add_trace(go.Scatter(
x=x_in,
y=df_input[each],
name=each,
opacity=0.8))
fig.update_layout(autosize=True,
width=1024,
height=768,
font=dict(
family="PT Sans, monospace",
size=18,
color="#7f7f7f"
)
)
fig.update_yaxes(type=y_scale),
fig.update_xaxes(tickangle=-45,
nticks=20,
tickfont=dict(size=14,color="#7f7f7f")
)
if slider==True:
fig.update_layout(xaxis_rangeslider_visible=True)
fig.show()
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
quick_plot(df_analyse.date,
df_analyse.iloc[:,3:-1],
y_scale='log',
slider=True)
###Output
_____no_output_____
###Markdown
Fitting a Polynomial curve
###Code
# check that all data are there
df_poly_check=df_analyse.iloc[0:27,3:-1].reset_index()
df_poly_check.head()
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
y=df_poly_check[['Germany','Italy','US','Spain']].unstack().sort_index(axis=0,level=1)
y.head()
test_points=28
y_train=y[0:-test_points-1]
y_test=y[-test_points:]
X_train=np.arange(len(y_train)).reshape(-1, 1)/4.0
X_test=np.arange(len(y_train),len(y_train)+test_points).reshape(-1, 1)/4.0
%matplotlib inline
import matplotlib as mlp
import matplotlib.pyplot as plt
import seaborn;
seaborn.set(rc={'figure.figsize':(16,8)}) # plot formatting, prepare for subplot
fig, ax1 = plt.subplots(1, 1)
ax1.scatter(np.arange(len(y))/4,y, color='blue')
ax1.axvspan((len(y)-test_points-1)/4, len(y)/4, facecolor='b', alpha=0.5)
for degree in [1,3,7,15]:
y_hat_insaple=PolynomialRegression(degree).fit(X_train, y_train).predict(X_train)
y_hat_test = PolynomialRegression(degree).fit(X_train, y_train).predict(X_test)
X_plot=np.concatenate((X_train, X_test), axis=None)
y_plot=np.concatenate((y_hat_insaple, y_hat_test), axis=None)
ax1.plot(X_plot, y_plot, label='degree={0}'.format(degree)+
' MAPE train: ' + str(mean_absolute_percentage_error(y_hat_insaple, y_train))[0:3]
+' MAPE test ' +str(mean_absolute_percentage_error(y_hat_test, y_test))[0:3])
ax1.set_ylim(100, 15000000)
ax1.set_yscale('log')
ax1.legend(loc='best',
prop={'size': 16});
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
###Output
_____no_output_____
###Markdown
 Data load
###Code
# try to parse the dates right at the beginning
# it works out of the box if the date was stored ISO YYYY-MM-DD format
df_analyse=pd.read_csv('../data/processed/COVID_small_sync_timeline_table.csv',sep=';')
df_analyse.sort_values('date',ascending=True).head()
country_list=df_analyse.columns[1:]
###Output
_____no_output_____
###Markdown
Helper functions
###Code
def quick_plot(x_in, df_input,y_scale='log',slider=False):
""" Quick basic plot for quick static evaluation of a time series
you can push selective columns of your data frame by .iloc[:,[0,6,7,8]]
Parameters:
----------
x_in : array
array of date time object, or array of numbers
df_input : pandas dataframe
the plotting matrix where each column is plotted
the name of the column will be used for the legend
scale: str
y-axis scale as 'log' or 'linear'
slider: bool
True or False for x-axis slider
Returns:
----------
"""
fig = go.Figure()
for each in df_input.columns:
fig.add_trace(go.Scatter(
x=x_in,
y=df_input[each],
name=each,
opacity=0.8))
fig.update_layout(autosize=True,
width=1024,
height=768,
font=dict(
family="PT Sans, monospace",
size=18,
color="#7f7f7f"
)
)
fig.update_yaxes(type=y_scale),
fig.update_xaxes(tickangle=-45,
nticks=20,
tickfont=dict(size=14,color="#7f7f7f")
)
if slider==True:
fig.update_layout(xaxis_rangeslider_visible=True)
fig.show()
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
quick_plot(df_analyse.date,
df_analyse.iloc[:,3:-1],
y_scale='log',
slider=True)
###Output
_____no_output_____
###Markdown
Fitting a polynomial curve I higly recommend section: Hyperparameters and Model Validation from Jake VanderPlas *This function is from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).* https://scikit-learn.org/stable/auto_examples/linear_model/plot_polynomial_interpolation.htmlsphx-glr-auto-examples-linear-model-plot-polynomial-interpolation-py
###Code
# check that all data are there
df_poly_check=df_analyse.iloc[0:27,3:-1].reset_index()
df_poly_check.head()
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
def PolynomialRegression(degree=2, **kwargs):
return make_pipeline(PolynomialFeatures(degree),
LinearRegression(**kwargs))
y=df_poly_check[['Germany','Italy','US','Spain']].unstack().sort_index(axis=0,level=1)
y.head()
test_points=28
y_train=y[0:-test_points-1]
y_test=y[-test_points:]
X_train=np.arange(len(y_train)).reshape(-1, 1)/4.0
X_test=np.arange(len(y_train),len(y_train)+test_points).reshape(-1, 1)/4.0
%matplotlib inline
import matplotlib as mlp
import matplotlib.pyplot as plt
import seaborn;
seaborn.set(rc={'figure.figsize':(16,8)}) # plot formatting, prepare for subplot
fig, ax1 = plt.subplots(1, 1)
ax1.scatter(np.arange(len(y))/4,y, color='blue')
ax1.axvspan((len(y)-test_points-1)/4, len(y)/4, facecolor='b', alpha=0.5)
for degree in [1,3,7,15]:
y_hat_insaple=PolynomialRegression(degree).fit(X_train, y_train).predict(X_train)
y_hat_test = PolynomialRegression(degree).fit(X_train, y_train).predict(X_test)
X_plot=np.concatenate((X_train, X_test), axis=None)
y_plot=np.concatenate((y_hat_insaple, y_hat_test), axis=None)
ax1.plot(X_plot, y_plot, label='degree={0}'.format(degree)+
' MAPE train: ' + str(mean_absolute_percentage_error(y_hat_insaple, y_train))[0:3]
+' MAPE test ' +str(mean_absolute_percentage_error(y_hat_test, y_test))[0:3])
ax1.set_ylim(100, 15000000)
ax1.set_yscale('log')
ax1.legend(loc='best',
prop={'size': 16});
###Output
_____no_output_____
###Markdown
Regression Metrics (source wikipedia) Mean Absolut Error In statistics, mean absolute error MAE''' is a measure of Errors between paired observations expressing the same phenomenon. Comparing examples of ''Y'' (forecasts) versus ''X'' (actual/observe) across time steps MAE is calculated as: $\mathrm{MAE} = \frac{\sum_{i=1}^n\left| y_i-x_i\right|}{n} =\frac{\sum_{i=1}^n\left| e_i \right|}{n}.$ Mean Absolut Percentage Error (MAPE) mean absolute percentage error (MAPE), also known as mean absolute percentage deviation (MAPD), is a measure of prediction accuracy of a forecasting method in [[statistics]],for example in trend estimation, also used as a loss function for regression problems in machine learning. It usually expresses the accuracy as a ratio defined by the formula:$\mbox{MAPE} = \frac{1}{n}\sum_{i=1}^n \left|\frac{x_i-y_i}{x_i}\right| $ if the MAPE is 80, on average, the forecast is off by 80%
###Code
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
###Output
_____no_output_____ |
notebook/extras/aula-07-07-contadores.ipynb | ###Markdown
Contadores
###Code
import pandas as pd # noqa E402
s = pd.Series(list('asdaesdaesdasesda'))
s
s.unique()
s.value_counts()
from collections import Counter # noqa E402
c = Counter(s)
c
df = pd.DataFrame(c, index=['Frequência'])
df.columns.name = 'Letras'
df
df.T
dados = pd.read_csv('../../data/extras/aluguel.csv', sep=';')
dados.head(10)
dados.Tipo.unique()
dados.Tipo.value_counts()
resumo = pd.DataFrame(dados.Tipo.value_counts())
resumo
resumo[resumo.index == "Apartamento"]
###Output
_____no_output_____ |
files/bigdata_pythonbasics.ipynb | ###Markdown
Python Basics Alexander Weinberg, 2021 This Jupyter Notebook accompanies the Data Science lectures for the 2021 UChicago Pathways in Economics program for high school students. In this introductory notebook, students will cover basic topics in Python. This notebook is aimed at students with zero programming background. The topics include:- Variable assignment- Commenting- Importing packages- Calculator functions- Boolean variables- FunctionsThis lecture is designed to be worked through at your own pace. Your TA will be able to assist you with any difficulties you may encounter. **A quick note on the exercises:** The best way to learn Python is by doing. After each topic, I have included short exercises as a helpful tool for learning. They are not supposed to be difficult and usually are fairly similar to the sample code I've written in this notebook. When you get to each exercise click the `plus` sign in the tool bar to create a new cell. Try and solve the exercises right there. There are often many ways to solve a problem and I've included sample solutions at the end of the notebook to compare.These lectures are inspired by the Quantecon Datascience notebooks. https://datascience.quantecon.org/ 1. Variable AssignmentVariable assignment associates a value to a variable. We can associate many different kinds of of values. **Click run or type `shift` + `Enter` to execute the cell**
###Code
# assign the value `5` to the variable `x`
x = 5
# print out the value associated with `x`
print(x)
###Output
5
###Markdown
We can also assign words to variables.
###Code
myname = "Alex"
print(myname)
###Output
Alex
###Markdown
We can write over the variable to assign a new value.
###Code
print(myname)
myname = "Lebron James"
# Run again to see that value has changed
print(myname)
###Output
Lebron James
###Markdown
CommentsCode comments are short notes that you leave for future readers of your code (usually yourself). Comments explain what the code does.Be kind to your future self; leave lots of comments.
###Code
firstname = "Lebron" # Assign the variable firstname
lastname = "James" # Assign the variable lastname
# Combine the firstname, a space, and lastname
name = firstname + " " + lastname
# Print out the full name
print(name)
###Output
Lebron James
###Markdown
Exercise 1---Here is an exercise to practice the material we've just learned.I've included **example solutions** at the *end of the notebook*.1. Assign your first name to the variable `firstname`.2. Assign your last name to the variable `lastname`.3. Combine your first name, last name, and a space into on variable `name`.4. Use the function `len` to find out the number of letters in the variable `name`. - The way you use `len` is similar to the way we used `print`.5. Assign the result of part (4) to a variable named `num_letters`. 2. PackagesPackages are collections of tools bundled together. Some of these libraries are massive projects maintained by many developers online.- `numpy` is a package with many tools for math (esp. linear algebra). - `pandas` is a package for data manipulation and analysis.- `matplotlib` is a library for making plots and data visualization.---Load packages using the function `import`.
###Code
# We are importing `numpy` with the nickname `np` for later convinience.
import numpy as np
###Output
_____no_output_____
###Markdown
Access functions and objects from the package using the following syntax. `package.function`
###Code
# We are going to access the function `mean` from the package `numpy`.
list_of_numbers = [1, 2, 3] # create a list of numbers
avg = np.mean(list_of_numbers) # compute the mean
print(avg) # print out the answer
###Output
2.0
###Markdown
Exercise 2--- Part A1. Load the package `time` with the nickname `tt`2. Use the function `localtime()` from the `time` package to see what the time is where you are right now. **Warning** Don't forget the parenthesis after the function name.3. Output should look like below.>time.struct_time(tm_year=2021, tm_mon=2, tm_mday=5, tm_hour=19, tm_min=41, tm_sec=59, tm_wday=4, tm_yday=36, tm_isdst=0)--- Part B Before we used the function `np.mean()` to compute the average of a list of numbers. Now we will use the function `np.sum()` in order to compute the sum.1. Create a new list of numbers using the numbers 5, 10, and 15.2. Use the function `np.sum()` to compute the sum. 3. Python can function as a calculator
###Code
# Assign variables
a = 10
b = 2
print(a)
print(b)
# Lets do some addition
a + b
# Now some subtraction
a - b
# Python can do many kinds of arithmetic
print("a + b is", a + b) # addition
print("a - b is", a - b) # subtraction
print("a * b is", a * b) # multiplication
print("a / b is", a / b) # division
print("a ** b is", a**b) # exponent
print("\nPython follows PEMDAS.\n")
out1 = (a + b) * a
out2 = a + (b * a)
print("out1 = ", out1)
print("out2 = ", out2)
###Output
a + b is 12
a - b is 8
a * b is 20
a / b is 5.0
a ** b is 100
Python follows PEMDAS.
out1 = 120
out2 = 30
###Markdown
4. FunctionsFunctions take inputs and return outputs. We've already worked with a few functions so far. - `print()`- `np.mean()`- `+`- `-`- `len()`We can write our own functions. Every function starts with `def` and ends with `return`.
###Code
def add2(input_number):
'''This is a simple function that adds 2 to any input.'''
result = input_number + 2
return result
ans = add2(21)
print(ans)
###Output
23
###Markdown
Exercise 41. Write a new function called `minus6`. 2. Similar to the function we wrote above it takes x as an input and returns x-6 5. ListsLists are a collections of items. Each item can be of any type. We have already worked with a list of numbers previously.
###Code
mylist = [5, "Alex", 6.3]
print(mylist)
###Output
[5, 'Alex', 6.3]
###Markdown
Access items in a list using brackets.This is called indexing.- Python starts counting at zero.
###Code
mylist[0] # first element
print(mylist[1]) # second element
print(mylist[2]) # third element
###Output
6.3
###Markdown
Use the function `len()` to compute the number of elements there are in a list.
###Code
num_elements = len(mylist)
print(num_elements)
###Output
3
###Markdown
Exercise 5---We are going to double check that the `np.mean()` function is working correctly.1. Make a list, named `list1`, with the numbers 1, 4, 7, 10, and 200.2. Use `np.sum()` to compute the sum of `list1`, name that `listsum1`.3. Get the number of elements in `list1`, name that `numelements1`.4. Compute the average of `list1` as follows: (Recall: / means divide in python.)$$mymean1 = \frac{listsum1}{numelements1}$$5. Use the `np.mean()` function to compute the mean of `list1`. Call that `numpy_mean`.6. Check the answers are identical. 6. For LoopsFor loops will loop over a list and do an operation for each element. Lets make a list named `basket`.
###Code
basket = ['apple', 'banana', 'grapes']
print(basket)
# For each element in our basket
# we're going to print out the phrase "Eat ____"
# We will remove the fruit from our basket
for item in basket:
print('Eat ', item)
###Output
Eat apple
Eat banana
Eat grapes
###Markdown
For loops are also helpful for working with a list of numbers.
###Code
for ii in [1,2,3,4,5]:
isquared = ii ** 2
print('The squared number = ', isquared)
###Output
The squared number = 1
The squared number = 4
The squared number = 9
The squared number = 16
The squared number = 25
###Markdown
Quiz 6---We are going to double check the function `np.sum()` works correctly.1. Create a variable named tally = 0.2. Create `list2` $=[1000,2000,3000]$.3. Use a for loop to compute the sum of `list2`. - Hint: Use the for loop to add each element to the tally.4. Compare to `np.sum()`. 7. BooleansThere are different kinds of variables. So far we've worked with- **strings** (words)- **floats** (numbers with decimals)- **integers** (numbers without decimals)Now we're going to work with Boolean variables. A boolean variable is either true or false.
###Code
x = True
y = False
print(x)
print(type(x))
a = 10
b = 16
print(a > b)
print(a < b)
print("a = ", a)
print("b = ", b)
print("a > b", "is", a > b) # greater than
print("a < b", "is", a < b) # less than
print("a == b", "is", a == b) # check for equality
print("a >= b", "is", a >= b) # greater than or equal to
print("a <= b", "is", a <= b) # less than or equal to
###Output
a = 10
b = 16
a > b is False
a < b is True
a == b is False
a >= b is False
a <= b is True
###Markdown
Booleans variables are the output of **variable comparison**. **Multiple comparisons** are handy.
###Code
a = 4
b = 5
(a < b ) and (a < 2*b) # true if both are true, and false if at least one is false
###Output
_____no_output_____
###Markdown
SolutionsCongrats! You've completed this intro notebook. Example Answers 1
###Code
num_letter = len(myname)
print(num_letter)
###Output
12
###Markdown
Example Answers 2
###Code
# Part A
import time as tt
time_right_now = tt.localtime()
print(time_right_now)
# Part B
new_list_of_numbers = [5,10,15]
print("Sum of these numbers is = ")
np.sum(new_list_of_numbers)
###Output
Sum of these numbers is =
###Markdown
Example Answers 4
###Code
def minus6(x):
'''This is a simple function subtracts 6 from x.'''
result = x - 6
return result
ans = minus6(21)
print(ans)
###Output
15
###Markdown
Example answers 5
###Code
# 1. Make a list
list1 = [1, 4, 7, 10, 200]
#2. Compute the sum of the list
listsum1 = np.sum(list1)
# 3. Get the number of elements in list1
numelements1 = len(list1)
# 4. Compute average manually
mymean1 = listsum1 / numelements1
# 5. Numpy average
numpy_mean = np.mean(list1)
# 6. Compare
print("My mean = ", mymean1)
print("Numpy mean = ", numpy_mean)
###Output
My mean = 44.4
Numpy mean = 44.4
###Markdown
Example answers 6
###Code
list2 = [1000, 2000, 3000]
numpy_sum = np.sum(list2)
print('Numpy sum = ', numpy_sum)
tally = 0
for num in list2:
# Add current num to the tally
tally = tally + num
print('My sum = ', tally)
###Output
My sum = 6000
|
notebooks/Sentinel_CNN.ipynb | ###Markdown
A CNN leveraging Sentinel Imagesauthors: Michael DarcyThis notebook contains a convolutional neural network implementation for the Lacuna Field Center prediction challenge. The network architecture is based on the pretrained ResNet152 network, available through PyTorch, trained with Absolute Mean Loss according to the competition rules. The ResNet152 output layer was modified to two floating-point classes to predict the x and y displacement. Only the images were used from the dataset.
###Code
import pandas_path
from IPython.display import Image
from PIL import Image as pil_image
import torch
from torch.utils.data import Dataset
from torchvision import transforms
import warnings
import pytorch_lightning as pl
from torch.utils.data import DataLoader
import torchvision.models as models
from tqdm import tqdm
from torch import nn
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from itertools import product as iterp
%load_ext nb_black
# this path is for the images
datapath = "C:/Users/micha/OneDrive/Documents/GitHub/Lacuna/sentinel/"
# this path is for the training and test csv files
DATA_PATH = "C:/Users/micha/OneDrive/Documents/GitHub/Lacuna/"
###Output
_____no_output_____
###Markdown
We probably don't need some of these but there's all the packages. Next we'll parse individual sentinel images into a 120x120x3 RGB image for packing into the right shape for ResNet152, a pretrained image classification network.
###Code
def getSentinelTimeData(path2tif, filterCloudy=True):
"""
process input Sentinel TIFF file to map its contents into a dictionary
with filter keys and monthly images as values
note: it won't process images with number of channels other than 192
when filterCloudy flag is True:
- excludes cloudy images based on cloud-mask (filter#16)
- final dictionary contains 13 real filter data
"""
# filter and month IDs
filters = [1,2,3]
months = [1,2,3,4,5,6,7,8,9]
# read TIFF file into 3D array
img = rdtif(path2tif)
# if it's one of the images with too few months, just take the months we have and leave the rest blank
if img.shape[-1] != 192: months = [1,2,3,4,5,6]
# initialize the dict with empty list for each filter, including cropping all images to the minimum size across all data
d = np.array(np.zeros((3,3,3,40,40)))
# this will pack the different channels into different elements of d
for i, j in iterp(months, filters):
channel = ((i) * 16) + j
tile_x = (i%3)-1
tile_y = int(np.floor(i/3))%3
#print (j, tile_x, tile_y)
d[j-1][tile_y][tile_x] = img[:40, :40, channel]
# now we are going to tile all the months that we have into a single channel, and separate the different RGB sentinel channels
stacks = {}
for filt in filters:
stacks[filt] = np.hstack(np.hstack(d[filt-1]))
return stacks
###Output
_____no_output_____
###Markdown
OK let's try this out with a random fieldID. We'll check the shape to make sure things are working at this stage.
###Code
from skimage.io import imread as rdtif
fieldID = 'ff4174be'
fname = f'{fieldID}.tif'
img = rdtif(f'{datapath}{fname}')
img.shape
###Output
_____no_output_____
###Markdown
Next, lets plot the tiled images we have so far using the function we wrote above. Note that the images are not normalized in the image parsing function because we are going to have to normalize them later in the pipeline anyways.
###Code
filters = [1, 2, 3]
cmap = plt.cm.gray
d = getSentinelTimeData(f"{datapath}{fname}")
fig, ax = plt.subplots(nrows=3, ncols=1, figsize=(16, 16), sharex=True, sharey=True)
for i in filters:
if d[i] is not None:
ax[i - 1].imshow(d[i], cmap=cmap)
ax[i - 1].set_title(f"filter = {i+1}")
plt.show()
###Output
_____no_output_____
###Markdown
Here we test out the planned RGB merge with pil_image.merge and check out our "color" image
###Code
# print(d[1] / np.amax(d[1]) * 256)
image_R = pil_image.fromarray(d[1] / np.amax(d[1]) * 256).convert("L")
image_G = pil_image.fromarray(d[2] / np.amax(d[2]) * 256).convert("L")
image_B = pil_image.fromarray(d[3] / np.amax(d[3]) * 256).convert("L")
# print(image_R.mode)
pil_image.merge("RGB", (image_R, image_G, image_B))
###Output
_____no_output_____
###Markdown
Now lets start importing our training data to take a look at what's inside. We'll only use the ID, x, and y columns.
###Code
train_metadata = pd.read_csv(DATA_PATH + "train-unique.csv")
train_metadata.head()
###Output
_____no_output_____
###Markdown
We then separate the training data into data and labels, adding a column with the full filename for easy retrieval during training.
###Code
train_labels = train_metadata[["ID", "x", "y"]].copy()
train_labels.head()
train_data = train_metadata.drop(columns=["x", "y"])
file_num = train_data["ID"]
train_data["file_name"] = [
DATA_PATH + "sentinel/" + x[3:] + ".tif" for x in train_data["ID"]
]
train_data.head()
###Output
_____no_output_____
###Markdown
Now we get into the meat of the code. This is a PyTorch Lightning Dataset with some specific modifications:1) During the transformation step, we will apply a random gaussian blur to augment the dataset and reduce overfitting. The other steps in the transform are necessary for optimal ResNet152 pretrained models.2) __getitem__ contains the processing we need to load up the image like we tested out before.
###Code
class DatasetSentinel(Dataset):
"""Reads in an image, transforms pixel values, and serves
a dictionary containing the image id, image tensors, and label.
"""
def __init__(self, x_train, y_train=None):
self.data = x_train
self.label = y_train
self.transform = transforms.Compose(
[
transforms.Resize((128, 128)),
transforms.ToTensor(),
# All models expect the same normalization mean & std
# https://pytorch.org/docs/stable/torchvision/models.html
transforms.Normalize(
mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)
),
transforms.GaussianBlur(5, sigma=(0.1, 2.0)),
]
)
self.Test_transform = transforms.Compose(
[
transforms.Resize((128, 128)),
transforms.ToTensor(),
# All models expect the same normalization mean & std
# https://pytorch.org/docs/stable/torchvision/models.html
transforms.Normalize(
mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)
),
]
)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
d = getSentinelTimeData(self.data.iloc[index]["file_name"])
image_R = pil_image.fromarray(d[1] / np.amax(d[1]) * 256).convert("L")
image_G = pil_image.fromarray(d[2] / np.amax(d[2]) * 256).convert("L")
image_B = pil_image.fromarray(d[3] / np.amax(d[3]) * 256).convert("L")
image = pil_image.merge("RGB", (image_R, image_G, image_B))
image_id = self.data.iloc[index]["ID"]
if self.label is not None:
image = self.transform(image)
label_x = self.label.iloc[index]["x"]
label_y = self.label.iloc[index]["y"]
sample = {
"image_id": image_id,
"image": image,
"label": np.array([label_x, label_y]),
}
else:
image = self.Test_transform(image)
sample = {"image_id": image_id, "image": image}
return sample
###Output
_____no_output_____
###Markdown
The competition rules state we should use L1Loss so that's our loss module; boilerplate.
###Code
class L1Loss(nn.Module):
"""Measures absolute mean error."""
def __init__(self):
super(L1Loss, self).__init__()
self.L1 = nn.L1Loss()
def forward(self, pred, true):
return self.L1(pred, true)
###Output
_____no_output_____
###Markdown
Here is the LightningModule for the model itself. We organize the hyperparameter inputs using the hparams object, set up logging, and define our datasets upon initialization.Then we load the pretrained model in prepare model, changing the last layer (.fc) to remove softmax, this allows us to predict floating point values. The rest of the functions are standard and documented in the official PyTorch Lightning documentation.The function make_submission_frame will generate the output file for our final prediction on the test set.
###Code
class PretrainedSentinelModel(pl.LightningModule):
def __init__(self, hparams):
super(PretrainedSentinelModel, self).__init__()
self.hparams = hparams
# hyperparameters for training the model with their default values as second argument
self.learning_rate = self.hparams.get("lr", 2e-4)
self.hidden_size = self.hparams.get("embedding_dim", 50)
self.dropout = self.hparams.get("dropout", 0.1)
self.max_epochs = self.hparams.get("max_epochs", 1)
self.num_workers = self.hparams.get("num_workers", 0)
self.batch_size = self.hparams.get("batch_size", 10)
# our datasets
self.x_train = self.hparams.get("x_train")
self.y_train = self.hparams.get("y_train")
self.x_val = self.hparams.get("x_val")
self.y_val = self.hparams.get("y_val")
self.num_outputs = 2 # two predictions for field displacement
# Where final model will be saved
self.output_path = Path.cwd() / self.hparams.get("output_path", "model-outputs")
self.output_path.mkdir(exist_ok=True)
# Where TensorBoard logs will be saved
self.log_path = Path.cwd() / self.hparams.get("log_path", "logs")
self.log_path.mkdir(exist_ok=True)
self.logger = pl.loggers.TensorBoardLogger(
self.log_path, name="benchmark_model"
)
# Instantiate training and validation datasets
self.train_dataset = DatasetSentinel(self.x_train, self.y_train)
self.val_dataset = DatasetSentinel(self.x_val, self.y_val)
self.model = self.prepare_model()
def prepare_model(self):
res_model = models.resnet152(pretrained=True)
# Input size of 2048 for resnet152
# https://pytorch.org/hub/pytorch_vision_resnet/
res_model.fc = nn.Sequential(
nn.Linear(2048, self.hidden_size),
nn.ReLU(inplace=True),
nn.Linear(self.hidden_size, self.num_outputs),
)
return res_model
def forward(self, image):
return self.model(image)
def training_step(self, batch, batch_idx):
x = batch["image"]
y = batch["label"]
criterion = L1Loss()
# Switch to training mode
loss = criterion(
self.model.train().forward(x).squeeze(), y.type(torch.FloatTensor).cuda()
)
tensorboard_logs = {"train_loss": loss}
self.log("train_loss", loss, on_step=True, on_epoch=True, prog_bar=True)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
x = batch["image"]
y = batch["label"]
criterion = L1Loss()
# Switch to evaluation mode
loss = criterion(
self.model.eval().forward(x).squeeze(), y.type(torch.FloatTensor).cuda()
)
tensorboard_logs = {"val_loss": loss}
self.log("val_loss", loss, on_step=True, on_epoch=True, prog_bar=True)
return {"batch_val_loss": loss}
def train_dataloader(self):
return DataLoader(
self.train_dataset, num_workers=self.num_workers, batch_size=self.batch_size
)
def val_dataloader(self):
return DataLoader(
self.val_dataset, num_workers=self.num_workers, batch_size=self.batch_size
)
def configure_optimizers(self):
return torch.optim.AdamW(self.model.parameters(), lr=self.learning_rate)
## Convenience Methods ##
def fit(self):
self.trainer = pl.Trainer(
max_epochs=self.max_epochs,
default_root_dir=self.output_path,
logger=self.logger,
checkpoint_callback=pl.callbacks.ModelCheckpoint(
filepath=self.output_path,
monitor="val_loss_epoch",
mode="min",
verbose=True,
),
gradient_clip_val=self.hparams.get("gradient_clip_val", 1),
num_sanity_val_steps=self.hparams.get("val_sanity_checks", 0),
gpus=1,
)
self.trainer.fit(self)
@torch.no_grad()
def make_submission_frame(self, x_test):
test_dataset = DatasetSentinel(x_test)
test_dataloader = DataLoader(
test_dataset, num_workers=self.num_workers, batch_size=self.batch_size
)
submission_frame = pd.DataFrame(index=x_test.ID, columns=["x", "y"])
for batch in tqdm(test_dataloader, total=len(test_dataloader)):
x = batch["image"]
preds = self.eval().forward(x)
# print(batch["image_id"], preds, submission_frame)
submission_frame.loc[batch["image_id"], ["x", "y"]] = (
preds.detach().numpy().squeeze()
)
submission_frame.x = submission_frame.x.astype(float)
submission_frame.y = submission_frame.y.astype(float)
return submission_frame
###Output
_____no_output_____
###Markdown
Now we'll split into training and validation sets. We don't need any particular shuffling since we assume the data rows are independent. I have kfold crossvalidation commented out in case we want to use that later.
###Code
## import the KFold object from sklearn
#from sklearn.model_selection import KFold
## We'll need this when we fit models
from sklearn.base import clone
# kfold = KFold(n_splits=2, shuffle=True, random_state=440)
split = int(len(train_data)*.8)
X_train = train_data.iloc[:split,:]
X_val= train_data.iloc[split+1:,:]
y_train = train_labels.iloc[:split,:]
y_val = train_labels.iloc[split:,:]
X_train.head()
###Output
_____no_output_____
###Markdown
Now it's time to run the model. For hyperparameter explanation please see the documentation for PyTorch.
###Code
# You can loop through all the splits like so
# for i in range(2):
# result = next(kfold.split(X), None)
# X_train, X_val = X.iloc[result[0]], X.iloc[result[1]]
# y_train, y_val = y.iloc[result[0]], y.iloc[result[1]]
hparams = {
# Required hparams
"x_train": X_train,
"y_train": y_train,
"x_val": X_val,
"y_val": y_val,
# Optional hparams
"lr": 2e-4,
"embedding_dim": 100,
"dropout": 0.5,
"max_epochs": 5,
"batch_size": 10,
"num_workers": 0,
"gradient_clip_val": 1,
"val_sanity_checks": 0,
"output_path": "model-outputs",
"log_path": "logs",
}
# best_checkpoint = str(Path("model-outputs") / "epoch=8-step=40481.ckpt")
Sentinel_model = PretrainedSentinelModel(hparams=hparams)
# storm_model = PretrainedWindModel.load_from_checkpoint(best_checkpoint)
Sentinel_model.fit()
###Output
GPU available: True, used: True
TPU available: None, using: 0 TPU cores
LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]
| Name | Type | Params
---------------------------------
0 | model | ResNet | 58.3 M
---------------------------------
58.3 M Trainable params
0 Non-trainable params
58.3 M Total params
###Markdown
Now it's time to test our model. We'll load up the test CSV file and create the file names just like for the training data.
###Code
test_data = pd.read_csv(DATA_PATH + "test.csv")
file_num = test_data["ID"]
test_data["file_name"] = [
DATA_PATH + "sentinel/" + x[3:] + ".tif" for x in test_data["ID"]
]
test_data.head()
###Output
_____no_output_____
###Markdown
Now we load up the best performing checkpoint and run the model once on the test data using the make_submission_frame method.
###Code
best_checkpoint = str(Path("model-outputs") / "epoch=0-step=29.ckpt")
Sentinel_model = PretrainedSentinelModel.load_from_checkpoint(best_checkpoint)
submission = Sentinel_model.make_submission_frame(test_data)
###Output
100%|████████████████████████████████████████████████████████████████████████████████| 162/162 [03:58<00:00, 1.47s/it]
###Markdown
Let's compare some statistics on our predicted displacements versus the known training displacements.
###Code
print("Test Prediction Distribution")
submission.describe()
print("Training Truth Distribution")
train_labels.describe()
###Output
Training Truth Distribution
###Markdown
As you can see, the model is very conservative and often makes little to no correction in the position. This suggests that the performance could be improved significantly if we could add more information to the data, or build an ensemble with other models. Rather than MAE, let's look at the individual errors too.
###Code
best_checkpoint = str(Path("model-outputs") / "epoch=0-step=29.ckpt")
Sentinel_model = PretrainedSentinelModel.load_from_checkpoint(best_checkpoint)
submission = Sentinel_model.make_submission_frame(X_train)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(16, 6), sharex=True, sharey=True)
std = np.arange(-1, 1, 0.01)
ax[0].scatter(submission["x"], y_train["x"])
ax[0].scatter(std, std)
ax[1].scatter(submission["y"], y_train["y"])
ax[1].scatter(std, std)
submission.to_csv((DATA_PATH + "Sentinelsubmission.csv"), index=True)
###Output
_____no_output_____ |
CNNTest_uuuuuhhmmm_80%done.ipynb | ###Markdown
###Code
import random
import numpy as np
import tensorflow as tf
from tarfile import open as taropen
from urllib import request
from struct import unpack
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM
import matplotlib.pyplot as plt
import os
import datetime
os.environ["CUDA_VISIBLE_DEVICES"]="0" #for training on gpu
Classes = ["fm", "pager", "smartwares", "no_signal"]
def create_data():
DataBaseURL= "http://computer-in.love/data/{}.tar"
data = []
normalisationFactor = 100
test = 0
for modulation in Classes:
filename = "{}.tar".format(modulation)
request.urlretrieve(DataBaseURL.format(modulation), filename)
tar = taropen(filename)
class_label = [0]*len(Classes)
class_label[Classes.index(modulation)]=1
for member in tar.getmembers():
test = test + 1
with tar.extractfile(member) as f:
sample = []
buffer = f.read()
num_floats = len(buffer)//4
floats = unpack("f"*num_floats, buffer)
i = floats[0::2]
q = floats[1::2]
for j in range(min(len(i), len(q))):
#here happens the scaling or whatever you want
sample.append([i[j]/normalisationFactor, q[j]/normalisationFactor])
#here happens some cross_feature if you want. just append it to the sample
data.append([np.array(sample), np.array(class_label)])
random.shuffle(data)
return data
def get_data(data,trainP = 60, valP= 20, testP = 20):
pre: trainP + valP + testP <= 100
size =len(data)
trainSize = (size * trainP)//100
print(trainSize)
valSize = (size * valP)//100
print(valSize)
testSize = (size * testP)//100
print(testSize)
train = data[:trainSize]
val = data[trainSize: trainSize + valSize]
test = data[trainSize + valSize: trainSize + valSize + testSize]
x_train = []
y_train = []
x_val = []
y_val = []
x_test = []
y_test = []
for sample, target in train:
x_train.append(sample)
y_train.append(target)
for sample, target in val:
x_val.append(sample)
y_val.append(target)
for sample, target in test:
x_test.append(sample)
y_test.append(target)
return np.array(x_train), np.array(x_val), np.array(x_test), np.array(y_train), np.array(y_val), np.array(y_test)
data= create_data()
train_X, val_X, test_X, train_y, val_y, test_y = get_data(data)
print(np.max(train_X[0]))
print(val_X.shape)
train_X = train_X.reshape(-1, 128, 2, 1)
test_X = test_X.reshape(-1,128,2,1)
val_X = val_X.reshape(-1,128,2,1)
training_iters = 100
learning_rate = 0.001
batch_size = 50
#data input (img shape: 128*2)
n_input_1 = 128
n_input_2 = 2
# data total classes (0-9 digits)
n_classes = 4
tf.reset_default_graph()
#both placeholders are of type float
x = tf.placeholder("float", [None, n_input_1,n_input_2,1])
y = tf.placeholder("float", [None, n_classes])
def conv2d(x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
return tf.nn.max_pool(x, ksize=[1, k, 1, 1], strides=[1, k, 1, 1],padding='SAME')
weights = {
'wc1': tf.get_variable('W0', shape=(11,2,1,8), initializer=tf.contrib.layers.xavier_initializer()),
'wc2': tf.get_variable('W1', shape=(11,2,8,8), initializer=tf.contrib.layers.xavier_initializer()),
#'wc3': tf.get_variable('W2', shape=(11,2,16,32), initializer=tf.contrib.layers.xavier_initializer()),
'wd1': tf.get_variable('W3', shape=(8*32*2,8), initializer=tf.contrib.layers.xavier_initializer()),
'out': tf.get_variable('W6', shape=(8,n_classes), initializer=tf.contrib.layers.xavier_initializer()),
}
biases = {
'bc1': tf.get_variable('B0', shape=(8), initializer=tf.contrib.layers.xavier_initializer()),
'bc2': tf.get_variable('B1', shape=(8), initializer=tf.contrib.layers.xavier_initializer()),
#'bc3': tf.get_variable('B2', shape=(32), initializer=tf.contrib.layers.xavier_initializer()),
'bd1': tf.get_variable('B3', shape=(8), initializer=tf.contrib.layers.xavier_initializer()),
'out': tf.get_variable('B4', shape=(n_classes), initializer=tf.contrib.layers.xavier_initializer()),
}
def conv_net(x, weights, biases):
# here we call the conv2d function we had defined above and pass the input image x, weights wc1 and bias bc1.
conv1 = conv2d(x, weights['wc1'], biases['bc1'])
# Max Pooling (down-sampling), this chooses the max value from a 2*1 matrix window and outputs a 64*2 matrix.
conv1 = maxpool2d(conv1, k=2)
# Convolution Layer
# here we call the conv2d function we had defined above and pass the input image x, weights wc2 and bias bc2.
conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
# Max Pooling (down-sampling), this chooses the max value from a 2*1 matrix window and outputs a 32*2 matrix.
conv2 = maxpool2d(conv2, k=2)
#conv3 = conv2d(conv2, weights['wc3'], biases['bc3'])
# Max Pooling (down-sampling), this chooses the max value from a 2*1 matrix window and outputs a 16*2.
#conv3 = maxpool2d(conv3, k=2)
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
fc1 = tf.nn.relu(fc1)
# Output, class prediction
# finally we multiply the fully connected layer with the weights and add a bias term.
out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
return out
pred = conv_net(x, weights, biases)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
#Here you check whether the index of the maximum value of the predicted image is equal to the actual labelled image. and both will be a column vector.
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
#calculate accuracy across all the given images and average them out.
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
train_loss_list = []
test_loss_list = []
validation_loss_list = []
train_accuracy_list = []
test_accuracy_list = []
validation_accuracy_list = []
summary_writer = tf.summary.FileWriter('./Output', sess.graph)
for i in range(training_iters):
batch_num = int(len(train_X)/training_iters/batch_size)
loss = 0.0
acc = 0.0
for batch in range(batch_num):
batch_x = train_X[(i*batch_num+batch)*batch_size:min(((i*batch_num+batch)*batch_size+1)*batch_size,len(train_X))]
batch_y = train_y[(i*batch_num+batch)*batch_size:min(((i*batch_num+batch)*batch_size+1)*batch_size,len(train_y))]
# Run optimization op (backprop).
# Calculate batch loss and accuracy
opt = sess.run(optimizer, feed_dict={x: batch_x,
y: batch_y})
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
y: batch_y})
train_loss_list.append(loss)
train_accuracy_list.append(acc)
val_acc,val_loss = sess.run([accuracy,cost], feed_dict={x: val_X,y : val_y})
validation_loss_list.append(val_loss)
validation_accuracy_list.append(val_acc)
print("Iter " + str(i) + ", Loss= " + \
"{:.6f}".format(train_loss_list[-1]) + ", Training Accuracy= " + \
"{:.5f}".format(train_accuracy_list[-1]))
print("Validation Accuracy:","{:.5f}".format(val_acc))
print("Validation Loss:","{:.5f}".format(val_loss))
test_acc,test_loss = sess.run([accuracy,cost], feed_dict={x: test_X,y : test_y})
test_loss_list.append(test_loss)
test_accuracy_list.append(test_acc)
print("Testing Accuracy:","{:.5f}".format(test_acc))
summary_writer.close()
plt.plot(range(len(train_loss_list)), train_loss_list, 'black', label='Training loss')
plt.plot(range(len(validation_loss_list)), validation_loss_list, 'red', label='Validation loss')
plt.title('Training and Validation loss')
plt.xlabel('Epochs ',fontsize=16)
plt.ylabel('Loss',fontsize=16)
plt.legend()
plt.figure()
plt.show()
plt.plot(range(len(train_loss_list)), train_accuracy_list, 'black', label='Training Accuracy')
plt.plot(range(len(validation_accuracy_list)), validation_accuracy_list, 'red', label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epochs ',fontsize=16)
plt.ylabel('Loss',fontsize=16)
plt.legend()
plt.figure()
plt.show()
modelName= "./models/model_{}.ckpt".format(datetime.datetime.now().strftime("%Y_%m_%d_%H:%M"))
save_path = saver.save(sess, modelName)
from google.colab import files
uploaded = files.upload()
saver = tf.train.Saver()
# Later, launch the model, use the saver to restore variables from disk, and
# do some work with the model.
counter = 0
with tf.Session() as sess:
saver = tf.train.import_meta_graph("models/model_2019_01_07_09:46.ckpt.meta")
foo = tf.train.latest_checkpoint('models/')
print(foo)
saver.restore(sess, foo)
print("Model restored.")
currentCounter = counter % len(test_X)
print("Prediciton: {}, Actual Label: {}".format(Classes[tf.argmax(sess.run([pred], feed_dict={x: [test_X[currentCounter]]}),1)]),(Classes[tf.argmax(test_y[currentCounter])]))
counter = counter + 1
###Output
models/model_2019_01_07_09:46.ckpt
INFO:tensorflow:Restoring parameters from models/model_2019_01_07_09:46.ckpt
Model restored.
|
DissertationmainNad_work.ipynb | ###Markdown
###Code
!pip show tensorflow
!pip show tensorflow_probability
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# Dependencies
import os
import warnings
#from absl import flags
import matplotlib
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import math
import pandas as pd
tfd = tfp.distributions
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
#from hyperopt import fmin, tpe, hp, STATUS_OK, STATUS_FAIL, Trials
#import python_utils
flags = tf.app.flags
FLAGS = tf.app.flags.FLAGS
flags.DEFINE_float("learning_rate", default = 0.0001, help = "Initial learning rate.")
flags.DEFINE_integer("epochs", default = 700, help = "Number of epochs to train for")
flags.DEFINE_integer("batch_size", default =128, help = "Batch size.")
flags.DEFINE_integer("eval_freq", default = 400, help =" Frequency at which to validate the model.")
flags.DEFINE_float("kernel_posterior_scale_mean", default = -0.9, help = "Initial kernel posterior mean of the scale (log var) for q(w)")
flags.DEFINE_float("kernel_posterior_scale_constraint", default = 0.2, help = "Posterior kernel constraint for the scale (log var) for q(w)")
flags.DEFINE_float("kl_annealing", default = 50, help = "Epochs to anneal the KL term (anneals from 0 to 1)")
flags.DEFINE_integer("num_hidden_layers", default = 4, help = "Number of hidden layers")
flags.DEFINE_integer("num_monte_carlo",
default=50,
help="Network draws to compute predictive probabilities.")
tf.app.flags.DEFINE_string('f', '', 'kernel')
#initialize flags
#FLAGS = flags.FLAGS
print(FLAGS.learning_rate)
print(FLAGS.epochs)
print(FLAGS.num_monte_carlo)
from google.colab import drive
drive.mount("/content/gdrive")
!ls "/content/gdrive/My Drive/"
# Read in the dataset
df = pd.read_csv('/content/gdrive/My Drive/work2.csv').astype(np.float32)
change = df.query('Speed>0').sample(frac = .1).index
df.loc[change, 'Speed'] = 0
df.loc[change, 'Class'] = 0
df.to_csv('work2.csv', header = True, index =False)
df.shape
X = df.iloc[:,:-1].values
y = df.iloc[:,-1].values
X
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=1)
#reshape y-data to become column vector
y_train = np.reshape(y_train,[-1,1])
y_val = np.reshape(y_val,[-1,1])
y_test = np.reshape(y_test,[-1,1])
# Standardize the dataset
#scalar_x_train = StandardScaler().fit(X_train)
#scalar_x_test = StandardScaler().fit(X_test)
#scalar_x_val = StandardScaler().fit(X_val)
#X_train = scalar_x_train.transform(X_train)
#X_test = scalar_x_test.transform(X_test)
#X_val = scalar_x_val.transform(X_val)
#X_train.dtype
# Standardize the dataset
scalar_x_train = StandardScaler().fit(X_train)
X_train = scalar_x_train.transform(X_train)
X_test = scalar_x_train.transform(X_test)
X_val = scalar_x_train.transform(X_val)
X_train.dtype
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
placeholder_X = tf.placeholder(tf.float32, shape = [None, 19])
placeholder_y = tf.placeholder(tf.float32, shape = [None,1])
#Build an iterator over training batches
#training_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train))
training_dataset = tf.data.Dataset.from_tensor_slices((placeholder_X, placeholder_y))
#Shuffle the dataset (note shuffle argument much larger than training size).learning_rate
# and form batches of size batch_size
training_batches = training_dataset.shuffle(20000, reshuffle_each_iteration =True).repeat().batch(FLAGS.batch_size)
#training_iterator = tf.data.make_one_shot_iterator(training_batches)
#Building iterator over the heldout set with batch_size = heldout_size,
# i.e., return the entire heldout set as a constant.
val_dataset = tf.data.Dataset.from_tensor_slices((placeholder_X, placeholder_y))
val_batches = val_dataset.repeat().batch(500)
#heldout_iterator = tf.data.make_one_shot_iterator(heldout_batches)
test_dataset = tf.data.Dataset.from_tensor_slices((X_test,y_test))
test_dataset = test_dataset.batch(500)
#Combine these into a feasible iterator that can switch between training
# and validation inputs.
# Here should be minibatch increment be defined
handle = tf.placeholder(tf.string, shape = [])
feedable_iterator = tf.data.Iterator.from_string_handle(handle, training_batches.output_types, training_batches.output_shapes)
features_final, labels_final = feedable_iterator.get_next()
#create Reinitializable iterator for Train and Validation, one hot iterator for Test
train_val_iterator = tf.data.Iterator.from_structure(training_batches.output_types, training_batches.output_shapes)
training_iterator = train_val_iterator.make_initializer(training_batches)
val_iterator = train_val_iterator.make_initializer(val_batches)
test_iterator = test_dataset.make_one_shot_iterator()
def main(argv):
# extract the activation function from the hyperopt spec as an attribute from the tf.nn module
#activation = getattr(tf.nn, FLAGS.activation_function)
# define the graph
#with tf.Graph().as_default():
# Building the Bayesian Neural Network
# we are Gaussian Reparametrization Trick
# to compute the stochastic gradients as described in the paper
with tf.compat.v1.name_scope("bayesian_neural_net", values =[features_final]):
neural_net = tf.keras.Sequential()
for i in range(FLAGS.num_hidden_layers):
layer = tfp.layers.DenseReparameterization(
units = 10,
activation = tf.nn.relu,
trainable = True,
kernel_prior_fn=tfp.layers.default_multivariate_normal_fn, # NormalDiag
kernel_posterior_fn=tfp.layers.default_mean_field_normal_fn(),
#kernel_posterior_fn=tfp_layers_util.default_mean_field_normal_fn(), # softplus(sigma)
kernel_posterior_tensor_fn=lambda x: x.sample(),
bias_prior_fn=tfp.layers.default_multivariate_normal_fn, # NormalDiag
bias_posterior_fn=tfp.layers.default_mean_field_normal_fn(), # softplus(sigma)
bias_posterior_tensor_fn=lambda x: x.sample()
)
neural_net.add(layer)
neural_net.add(tfp.layers.DenseReparameterization(
units=2, # one dimensional output
activation= tf.nn.softmax, # since regression (outcome not bounded)
trainable=True, # i.e subject to optimization
kernel_prior_fn=tfp.layers.default_multivariate_normal_fn, # NormalDiag with hyperopt sigma
kernel_posterior_fn=tfp.layers.default_mean_field_normal_fn(), # softplus(sigma)
kernel_posterior_tensor_fn=lambda x: x.sample(),
bias_prior_fn =tfp.layers.default_multivariate_normal_fn, # NormalDiag with hyperopt sigma
bias_posterior_fn=tfp.layers.default_mean_field_normal_fn(), # softplus(sigma)
bias_posterior_tensor_fn=lambda x: x.sample()
))
logits = neural_net(features_final)
#labels_distribution = tfd.Bernoulli(logits=logits)
labels_distribution = tfd.Categorical(logits=logits)
#labels_distribution = tfd.Bernoulli(logits=logits)
# Perform KL annealing. The optimal number of annealing steps
# depends on the dataset and architecture.
t = tf.Variable(0.0)
kl_regularizer = t / (FLAGS.kl_annealing * len(X_train) / FLAGS.batch_size)
#Compute the -ELBO as the loss. The kl term is annealed from 1 to 1 over
# the epochs specified by the kl_annealing flag.
log_likelihood = labels_distribution.log_prob(labels_final)
#neg_log_likelihood = tf.reduce_mean(tf.squared_difference(logits,labels_final))
neg_log_likelihood = -tf.reduce_mean(input_tensor = log_likelihood)
kl = sum(neural_net.losses)/len(X_train) * tf.minimum(1.0, kl_regularizer)
elbo_loss = neg_log_likelihood + kl
# Build metrics for evaluation. Predictions are formed from single forward
# pass of the probablisitic layers . They are cheap but noisy predictions
predictions = tf.argmax(input = logits, axis=1)
predictions = tf.cast(predictions, tf.float32)
# TP, TN, FP, FN
TP = tf.count_nonzero(predictions * labels_final)
TN = tf.count_nonzero((predictions - 1) * (labels_final - 1))
FP = tf.count_nonzero(predictions * (labels_final - 1))
FN = tf.count_nonzero((predictions - 1) * labels_final)
# precision, recall, f1
precision = TP / (TP + FP)
recall = TP / (TP + FN)
f1 = 2 * precision * recall / (precision + recall)
tpr = TP/(TP+FN)
fpr = FP/(TP+FN)
#create Reinitializable iterator for Train and Validation, one hot iterator for Test
train_val_iterator = tf.data.Iterator.from_structure(training_batches.output_types, training_batches.output_shapes)
training_iterator = train_val_iterator.make_initializer(training_batches)
val_iterator = train_val_iterator.make_initializer(val_batches)
test_iterator = test_dataset.make_one_shot_iterator()
with tf.compat.v1.name_scope("train"):
train_accuracy, train_accuracy_update_op = tf.metrics.accuracy(labels=labels_final,predictions =predictions)
opt = tf.train.AdamOptimizer(FLAGS.learning_rate)
train_op = opt.minimize(elbo_loss)
update_step_op = tf.assign(t, t+1)
with tf.compat.v1.name_scope("valid"):
valid_accuracy, validation_accuracy_update_op = tf.metrics.accuracy(labels= labels_final,predictions = predictions)
with tf.compat.v1.name_scope("test"):
test_accuracy, test_accuracy_update_op = tf.metrics.accuracy(labels = labels_final,predictions = predictions)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
saver = tf.train.Saver()
stream_vars_valid = [ v for v in tf.local_variables() if "valid" in v.name]
reset_valid_op = tf.variables_initializer(stream_vars_valid)
valid_accuracy_summary = []
stop_early =0
with tf.compat.v1.Session() as sess:
sess.run(init_op)
# Run the training loop
train_val_string, test_string = sess.run([
train_val_iterator.string_handle(),
test_iterator.string_handle()])
training_steps = int(round(FLAGS.epochs * (len(X_train) / FLAGS.batch_size)))
for step in range(training_steps):
#start reininitializable's train iterator
sess.run(training_iterator, feed_dict = {placeholder_X:X_train, placeholder_y:y_train})
#
_ = sess.run([train_op,train_accuracy_update_op, update_step_op],feed_dict={handle: train_val_string})
# Manually print the frequency
if step % 100 == 0:
save_path = saver.save(sess, "/tmp/my_model.ckpt")
loss_value, accuracy_value, kl_value = sess.run([elbo_loss, train_accuracy, kl], feed_dict= {handle: train_val_string})
print("Step:{:>3d} loss : {:.3f} KL: {:.3f}" .format(step , loss_value, accuracy_value, kl_value))
if (step +1) % FLAGS.eval_freq ==0:
# Compute log prob of heldout set by averaging draws from the model:
# p(heldout | train) = int_model p(heldout|model) p(model|train) ~= 1/n * sum_{i=1}^n p(heldout | model_i)
# where model_i is a draw from the posterior
#p(model|train)
probs = np.asarray([sess.run((labels_distribution.probs),
feed_dict ={handle: train_val_string})
for _ in range(FLAGS.num_monte_carlo)])
mean_probs = np.mean(probs, axis =0).astype(np.int32)
print(mean_probs.dtype)
_, label_vals = sess.run((features_final, labels_final), feed_dict = {handle: train_val_string})
label_vals = (label_vals).astype(np.int32)
heldout_lp = np.mean(np.log(mean_probs[np.arange(mean_probs.shape[0]), label_vals]))
print(" ...Held_out nats: {:.3f}".format(heldout_lp))
# Calculate validation accuracy
for step in range(10):
#start reinitializable's validation iterator
sess.run(val_iterator, feed_dict = {placeholder_X:X_val, placeholder_y:y_val})
sess.run(validation_accuracy_update_op, feed_dict={handle:train_val_string})
valid_value = sess.run(valid_accuracy, feed_dict={handle:train_val_string})
valid_accuracy_summary.append(valid_value)
if valid_value < max(valid_accuracy_summary) and step > 100:
stop_early += 1
if stop_early == 40:
break
else:
stop_early = 0
print("Validation Accuracy: {:.3f}".format(valid_value))
sess.run(reset_valid_op)
#Feed to r=feedable iterator the string handle
test_value, precision_value, recall_value, fpr_value, tpr_value,f1 = sess.run([test_accuracy, precision, recall, fpr, tpr,f1],feed_dict={handle: test_string})
print("Step: {:>3d} test Accuracy: {:.3f} Precision: {:.3f} Recall: {:.3f} ".format(step, test_value, precision_value, recall_value))
print("Step: {:>3d} fpr: {:.3f} tpr: {:.3f} f1_1: {:.3f}".format( step, fpr_value, tpr_value,f1))
if __name__ == "__main__":
tf.compat.v1.app.run()
# create the training datasets
dx_train = tf.data.Dataset.from_tensor_slices(X_train)
# apply a one-hot transformation to each label for use in the neural network
dy_train = tf.data.Dataset.from_tensor_slices(y_train)
# zip the x and y training data together and shuffle, batch etc.
train_dataset = tf.data.Dataset.zip((dx_train, dy_train)).shuffle(500).repeat().batch(8)
# do the same operations for the validation set
dx_valid = tf.data.Dataset.from_tensor_slices(X_test)
dy_valid = tf.data.Dataset.from_tensor_slices(y_test)
valid_dataset = tf.data.Dataset.zip((dx_valid, dy_valid)).shuffle(500).repeat().batch(30)
# create general iterator
iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
train_dataset.output_shapes)
next_element = iterator.get_next()
# make datasets that we can initialize separately, but using the same structure via the common iterator
training_init_op = iterator.make_initializer(train_dataset)
validation_init_op = iterator.make_initializer(valid_dataset)
def nn_model(in_data):
bn = tf.layers.batch_normalization(in_data)
fc1 = tf.layers.dense(bn, 19)
fc2 = tf.layers.dense(fc1, 8)
fc3 = tf.layers.dense(fc2, 2)
return fc3
# create the neural network model
logits = nn_model(next_element[0])
# add the optimizer and loss
loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(labels=next_element[1], logits=logits))
optimizer = tf.train.AdamOptimizer(1e-8).minimize(loss)
# get accuracy
predictions = tf.argmax(logits, 1)
equality = tf.equal(predictions, tf.argmax(next_element[1], 1))
accuracy = tf.reduce_mean(tf.cast(equality, tf.float32))
#TP, TN, FP, FN
TP = tf.count_nonzero(predictions * y_test, dtype=tf.float32)
TN = tf.count_nonzero((predictions-1)*(y_test-1), dtype=tf.float32)
FP = tf.count_nonzero(predictions*(y_test-1), dtype=tf.float32)
FN = tf.count_nonzero((predictions-1)*y_test, dtype=tf.float32)
#precision, recall, f1
precision = TP / (TP + FP)
recall = TP / (TP + FN)
f1 = 2 * precision * precision * recall / (precision + recall)
tpr = TP/(TP+FN)
fpr = FP/(TP+FN)
init_op = tf.global_variables_initializer()
# run the training
epochs = 600
with tf.Session() as sess:
sess.run(init_op)
sess.run(training_init_op)
for i in range(epochs):
l, _, acc = sess.run([loss, optimizer, accuracy])
if i % 50 == 0:
print("Epoch: {}, loss: {:.3f}, training accuracy: {:.2f}%".format(i, l, acc * 100))
# now setup the validation run
valid_iters = 1000
# re-initialize the iterator, but this time with validation data
sess.run(validation_init_op)
avg_acc = 0
for i in range(valid_iters):
loss_value, valid_accuracy, precision_value, recall_value, fpr_value, tpr_value,f1_value = sess.run([loss, accuracy, precision, recall, fpr, tpr,f1])
print("Step: {:>3d} loss: {:.3f} Validation Accuracy: {:.3f} Precision: {:.3f} Recall: {:.3f} fpr:{:.3f} tpr:{:.3f} f1:{:.3f} ".format(
i, loss_value, valid_accuracy, precision_value, recall_value, fpr_value, tpr_value, f1_value))
acc = sess.run([accuracy])
avg_acc += acc[0]
print("Average validation set accuracy over {} iterations is {:.2f}%".format(valid_iters,(avg_acc ) ))
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, precision_score, recall_score, f1_score
#clf = svm.SVC(kernel='rbf',degree=3, probability=False,tol=0.001)
#clf = RandomForestClassifier(n_estimators=100,random_state=0)
clf = LogisticRegression(C=0.1)
clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
fpr, tpr = roc_curve(y_test,y_pred)
print(confusion_matrix(y_test,y_pred))
print(classification_report(y_test,y_pred))
print(precision_score(y_test,y_pred))
print( recall_score(y_test,y_pred))
print( f1_score(y_test,y_pred))
print(accuracy_score(y_test, y_pred))
print(fpr, tpr)
###Output
_____no_output_____ |
sphinx/example_regression.ipynb | ###Markdown
Example RegressionIn this notebook will be showed how to use lit-saint for a regression problem. We will use the "California housing" dataset in which the objective is to predict the Median house value for households within a block (measured in US Dollars) Import libraries
###Code
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from lit_saint import Saint, SaintConfig, SaintDatamodule, SaintTrainer
from pytorch_lightning import Trainer, seed_everything
###Output
_____no_output_____
###Markdown
Download Data
###Code
df = fetch_california_housing(as_frame=True)
###Output
_____no_output_____
###Markdown
Configure lit-saint
###Code
# if you want to used default value for the parameters
cfg = SaintConfig()
# otherwise you can use hydra to read a config file (uncomment the following part)
# from hydra.core.config_store import ConfigStore
# cs = ConfigStore.instance()
# cs.store(name="base_config", node=SaintConfig)
# with initialize(config_path="."):
# cfg = compose(config_name="config")
###Output
_____no_output_____
###Markdown
Prepare Data
###Code
seed_everything(42, workers=True)
df = df.frame
df_train, df_test = train_test_split(df, test_size=0.10, random_state=42)
df_train, df_val = train_test_split(df_train, test_size=0.10, random_state=42)
df_train["split"] = "train"
df_val["split"] = "validation"
df = pd.concat([df_train, df_val])
# The target is in the column MedHouseVal and we can see that it contains some floats so the library will considered the problem as a regression
df.head()
###Output
Global seed set to 42
###Markdown
Fit the model
###Code
data_module = SaintDatamodule(df=df, target="MedHouseVal", split_column="split",
num_workers=cfg.network.num_workers)
model = Saint(categories=data_module.categorical_dims, continuous=data_module.numerical_columns,
config=cfg, dim_target=data_module.dim_target)
pretrainer = Trainer(max_epochs=cfg.pretrain.epochs)
trainer = Trainer(max_epochs=10)
saint_trainer = SaintTrainer(pretrainer=pretrainer, trainer=trainer)
saint_trainer.fit(model=model, datamodule=data_module, enable_pretraining=True)
###Output
_____no_output_____
###Markdown
Make predictions
###Code
prediction = saint_trainer.predict(model=model, datamodule=data_module, df=df_test)
df_test["prediction"] = prediction
expl_variance = explained_variance_score(df_test["MedHouseVal"], df_test["prediction"])
mae = mean_absolute_error(df_test["MedHouseVal"], df_test["prediction"])
mse = mean_squared_error(df_test["MedHouseVal"], df_test["prediction"])
print(f"Explained Variance: {expl_variance} MAE: {mae} MSE: {mse}")
###Output
Explained Variance: 0.6835245941402444 MAE: 0.4766954095214282 MSE: 0.42516899954601584
###Markdown
Uncertainty Estimation
###Code
mc_prediction = saint_trainer.predict(model=model, datamodule=data_module, df=df_test, mc_dropout_iterations=4)
mc_prediction
# Given the predictions we can compute the variance across the iterations, so axis=2
var_prediction = np.var(mc_prediction,axis=2)
# Then we focus our attention on the variance of the first class
pd.DataFrame(var_prediction[:,0], columns=["variance"]).hist()
###Output
_____no_output_____ |
Chapter_Preprocessing/MultivariateLinearRegression_VSdata.ipynb | ###Markdown
Chapter: Data Preprocessing Topic: Implementing MLR on simulated process data
###Code
# read data
import numpy as np
VSdata = np.loadtxt('VSdata.csv', delimiter=',')
VSdata_val = np.loadtxt('VSdata_val.csv', delimiter=',')
# separate X and y
y_train = VSdata[:,0]
X_train = VSdata[:,1:]
y_val = VSdata_val[:,0]
X_val = VSdata_val[:,1:]
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## MLR using all variables
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# fit model on training data
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from matplotlib import pyplot as plt
# scale X
scaler_all = StandardScaler()
X_train_scaled = scaler_all.fit_transform(X_train)
X_val_scaled = scaler_all.transform(X_val)
# fit
MLR_all = LinearRegression().fit(X_train_scaled, y_train)
# predict
y_val_pred = MLR_all.predict(X_val_scaled)
# score
R2_all_train = MLR_all.score(X_train_scaled, y_train)
R2_all = MLR_all.score(X_val_scaled, y_val)
print(R2_all)
# plot raw vs predicted target
plt.figure()
plt.plot(y_val, y_val_pred, '.')
plt.title('Using all variables')
plt.xlabel('observed y (validation data)')
plt.ylabel('predicted y (validation data)')
plt.show()
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## MLR using only 10 relevant variables
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# select only relevant inputs
k = 10
X_train_rel = X_train[:,16:16+k]
X_val_rel = X_val[:,16:16+k]
# scale X
scaler_rel = StandardScaler()
X_train_rel_scaled = scaler_rel.fit_transform(X_train_rel)
X_val_rel_scaled = scaler_rel.transform(X_val_rel)
# fit
MLR_rel = LinearRegression().fit(X_train_rel_scaled, y_train)
# predict
y_val_rel_pred = MLR_rel.predict(X_val_rel_scaled)
# score
R2_rel_train = MLR_rel.score(X_train_rel_scaled, y_train)
R2_rel = MLR_rel.score(X_val_rel_scaled, y_val)
print(R2_rel)
# plot raw vs predicted target
plt.figure()
plt.plot(y_val, y_val_rel_pred, '.')
plt.title('Using relevant variables')
plt.xlabel('observed y (validation data)')
plt.ylabel('predicted y (validation data)')
plt.show()
###Output
0.6339710202313695
|
Heart Failure Prediction .ipynb | ###Markdown
Risk of death by gender (0=Female, 1=Male)
###Code
df_good = df[df["DEATH_EVENT"] == 0]
df_bad = df[df["DEATH_EVENT"] == 1]
fig, ax = plt.subplots(figsize=(12,8))
ax = sns.countplot(x="sex", hue="DEATH_EVENT", data=df)
fig.tight_layout
###Output
_____no_output_____
###Markdown
Risk of death due by Anemia (0=No Anemia, 1=Anemia)
###Code
df_good = df[df["DEATH_EVENT"] == 0]
df_bad = df[df["DEATH_EVENT"] == 1]
fig, ax = plt.subplots(figsize=(12,8))
ax = sns.countplot(x="anaemia", hue="DEATH_EVENT", data=df)
fig.tight_layout
###Output
_____no_output_____
###Markdown
Risk of death due by diabetes (0=No diabetes, 1=diabetes)
###Code
df_good = df[df["DEATH_EVENT"] == 0]
df_bad = df[df["DEATH_EVENT"] == 1]
fig, ax = plt.subplots(figsize=(12,8))
ax = sns.countplot(x="diabetes", hue="DEATH_EVENT", data=df)
fig.tight_layout
###Output
_____no_output_____
###Markdown
Risk of death due by high blood pressure (0=No high blood pressure, 1=high blood pressure)
###Code
df_good = df[df["DEATH_EVENT"] == 0]
df_bad = df[df["DEATH_EVENT"] == 1]
fig, ax = plt.subplots(figsize=(12,8))
ax = sns.countplot(x="high_blood_pressure", hue="DEATH_EVENT", data=df)
fig.tight_layout
###Output
_____no_output_____
###Markdown
Risk of death due by smoking (0=Non smoker, 1=smoker)
###Code
df_good = df[df["DEATH_EVENT"] == 0]
df_bad = df[df["DEATH_EVENT"] == 1]
fig, ax = plt.subplots(figsize=(12,8))
ax = sns.countplot(x="smoking", hue="DEATH_EVENT", data=df)
fig.tight_layout
###Output
_____no_output_____
###Markdown
Age distribution of the dataset
###Code
sns.set_style('whitegrid')
df['age'].hist(bins=35)
plt.xlabel('Age')
sns.heatmap(df.isnull(),yticklabels=False,cbar=False,cmap='viridis')
# we have no blanks or nulls in the dataset; too good to be true
df
###Output
_____no_output_____
###Markdown
We are doing one hot encoding on the binary features, not required every time but it is a good practice
###Code
anaemia1 = pd.get_dummies(df['anaemia'],drop_first=True)
diabetes1 = pd.get_dummies(df['diabetes'],drop_first=True)
high_blood_pressure1 = pd.get_dummies(df['high_blood_pressure'],drop_first=True)
sex1 = pd.get_dummies(df['sex'],drop_first=True)
smoking1 = pd.get_dummies(df['smoking'],drop_first=True)
df.drop(['anaemia', 'diabetes', 'high_blood_pressure', 'sex','smoking'],axis=1,inplace=True)
df = pd.concat([df,anaemia1,diabetes1,high_blood_pressure1,sex1,smoking1],axis=1)
df.head()
###Output
_____no_output_____
###Markdown
Using a simple logistic model
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df.drop('DEATH_EVENT',axis=1),
df['DEATH_EVENT'], test_size=0.33,
random_state=120)
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train,y_train)
predictions = lr.predict(X_test)
predictions
from sklearn.metrics import classification_report, accuracy_score,confusion_matrix
###Output
_____no_output_____
###Markdown
Not bad for starters
###Code
print(classification_report(y_test,predictions))
print('Accuracy score:', accuracy_score(y_test,predictions))
print('\n confusion_matrix:', confusion_matrix(y_test,predictions))
###Output
precision recall f1-score support
0 0.88 0.91 0.90 67
1 0.80 0.75 0.77 32
accuracy 0.86 99
macro avg 0.84 0.83 0.84 99
weighted avg 0.86 0.86 0.86 99
Accuracy score: 0.8585858585858586
confusion_matrix: [[61 6]
[ 8 24]]
###Markdown
Grid search is done to get the best parameters for the current model
###Code
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
param_grid = {'C': [0.7,0.9,1,10,100,1000], 'max_iter': [50,100,105,110,120,1000]}
grid = GridSearchCV(lr,param_grid,verbose=3)
grid.fit(X_train,y_train)
grid.best_params_
grid.best_estimator_
grid_predictions = grid.predict(X_test)
print(confusion_matrix(y_test,grid_predictions))
print(classification_report(y_test,grid_predictions))
###Output
precision recall f1-score support
0 0.89 0.94 0.91 67
1 0.86 0.75 0.80 32
accuracy 0.88 99
macro avg 0.87 0.85 0.86 99
weighted avg 0.88 0.88 0.88 99
|
sklearn/sklearn learning/demonstration/auto_examples_jupyter/classification/plot_lda_qda.ipynb | ###Markdown
Linear and Quadratic Discriminant Analysis with covariance ellipsoidThis example plots the covariance ellipsoids of each class anddecision boundary learned by LDA and QDA. The ellipsoids displaythe double standard deviation for each class. With LDA, thestandard deviation is the same for all the classes, while eachclass has its own standard deviation with QDA.
###Code
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# #############################################################################
# Colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
# #############################################################################
# Generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
# #############################################################################
# Plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with\n fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with\n varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
# class 0: dots
plt.scatter(X0_tp[:, 0], X0_tp[:, 1], marker='.', color='red')
plt.scatter(X0_fp[:, 0], X0_fp[:, 1], marker='x',
s=20, color='#990000') # dark red
# class 1: dots
plt.scatter(X1_tp[:, 0], X1_tp[:, 1], marker='.', color='blue')
plt.scatter(X1_fp[:, 0], X1_fp[:, 1], marker='x',
s=20, color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.), zorder=0)
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='white')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'*', color='yellow', markersize=15, markeredgecolor='grey')
plt.plot(lda.means_[1][0], lda.means_[1][1],
'*', color='yellow', markersize=15, markeredgecolor='grey')
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, facecolor=color,
edgecolor='black', linewidth=2)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.2)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariance_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariance_[1], 'blue')
plt.figure(figsize=(10, 8), facecolor='white')
plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis',
y=0.98, fontsize=15)
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# Linear Discriminant Analysis
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# Quadratic Discriminant Analysis
qda = QuadraticDiscriminantAnalysis(store_covariance=True)
y_pred = qda.fit(X, y).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.tight_layout()
plt.subplots_adjust(top=0.92)
plt.show()
###Output
_____no_output_____ |
ICCT_it/examples/02/.ipynb_checkpoints/TD-16-PID-controller-Sistema-in-anello-chiuso-checkpoint.ipynb | ###Markdown
Controller PID - sistema in anello chiusoUn algoritmo di controllo proporzionale-integrale-derivativo (PID) è di gran lunga l'algoritmo di controllo più comune. La sua funzione di trasferimento è pari a:\begin{equation} P(s)=K_p \cdot \left( 1 + \frac{1}{T_i s} + T_d s \right).\end{equation}È costituito dalla somma dei canali proporzionale, integrale e derivativo. Non tutti devono essere necessariamente presenti, quindi vengono utilizzati anche algoritmi di controllo P, PI o PD. In questo esempio viene mostrata la risposta di un controller P, PI, PD o PID al gradino unitario, all'impulso unitario, alla rampa unitaria o all'ingresso sinusoidale. Il controller in questo caso è parte di un sistema di controllo in anello chiuso. Il sistema da controllare può essere un proporzionale, un sistema del primo ordine, un integratore o un integratore con un polo stabile.I grafici seguenti mostrano:1. La risposta del sistema ad anello chiuso per l'ingresso selezionato con il sistema selezionato e il controller selezionato (figura a sinistra).2. La posizione degli zeri e dei poli della funzione di trasferimento del sistema ad anello chiuso risultante.--- Come usare questo notebook?1. Alterna tra *gradino*, *impulso*, *rampa* e *sinusoide* per selezionare il segnale di ingresso.2. Clicca su *P0*, *P1*, *I0* o *I1* per alternare tra i seguenti sistemi: proporzionale, un sistema del primo ordine, un integratore o un integratore con un polo stabile. La funzione di trasferimento di P0 è $k_p$ (in questo esempio $k_p=2$), quella di PI $\frac{k_p}{\tau s+1}$ (in questo esempio $k_p=1$ e $\tau=2$), quella di IO $\frac{k_i}{s}$ (in questo esempio $k_i=\frac{1}{10}$) e infine quella di I1 è $\frac{k_i}{s(\tau s +1) }$ (in questo esempio $k_i=1$ e $\tau=10$ ).3. Clicca sui pulsanti *P*, *PI*, *PD* e *PID* per selezionare tra proporzionale, proporzionale-integrale, proporzionale-derivativo o proporzionale–integrale–derivativo.4. Sposta gli sliders per modificare i valori dei coefficienti PID proporzionale ($K_p$), integrale ($T_i$) e derivativo ($T_d$).5. Sposta lo slider $t_{max}$ per modificare il valore massimo del tempo sull'asse x del grafico della risposta.
###Code
A = 10
a=0.1
s, P, I, D = sym.symbols('s, P, I, D')
obj = 1/(A*s)
PID = P + P/(I*s) + P*D*s#/(a*D*s+1)
system = obj*PID/(1+obj*PID)
num = [sym.fraction(system.factor())[0].expand().coeff(s, i) for i in reversed(range(1+sym.degree(sym.fraction(system.factor())[0], gen=s)))]
den = [sym.fraction(system.factor())[1].expand().coeff(s, i) for i in reversed(range(1+sym.degree(sym.fraction(system.factor())[1], gen=s)))]
# make figure
fig = plt.figure(figsize=(9.8, 4),num='Controller PID - sistema in anello chiuso')
plt.subplots_adjust(wspace=0.3)
# add axes
ax = fig.add_subplot(121)
ax.grid(which='both', axis='both', color='lightgray')
ax.set_title('Risposta')
ax.set_xlabel('$t$ [s]')
ax.set_ylabel('input, output')
ax.axhline(linewidth=.5, color='k')
ax.axvline(linewidth=.5, color='k')
rlocus = fig.add_subplot(122)
input_type = 'gradino'
# plot step function and responses (initalisation)
input_plot, = ax.plot([],[],'C0', lw=1, label='input')
response_plot, = ax.plot([],[], 'C1', lw=2, label='output')
ax.legend()
rlocus_plot, = rlocus.plot([], [], 'r')
plt.show()
def update_plot(KP, TI, TD, Time_span):
global num, den, input_type
num_temp = [float(i.subs(P,KP).subs(I,TI).subs(D,TD)) for i in num]
den_temp = [float(i.subs(P,KP).subs(I,TI).subs(D,TD)) for i in den]
system = signal.TransferFunction(num_temp, den_temp)
zeros = np.roots(num_temp)
poles = np.roots(den_temp)
rlocus.clear()
rlocus.scatter([np.real(i) for i in poles], [np.imag(i) for i in poles], marker='x', color='g', label='polo')
rlocus.scatter([np.real(i) for i in zeros], [np.imag(i) for i in zeros], marker='o', color='g', label='zero')
rlocus.set_title('Mappa poli-zeri')
rlocus.set_xlabel('Re')
rlocus.set_ylabel('Im')
rlocus.grid(which='both', axis='both', color='lightgray')
time = np.linspace(0, Time_span, 300)
if input_type == 'gradino':
u = np.ones_like(time)
u[0] = 0
time, response = signal.step(system, T=time)
elif input_type == 'impulso':
u = np.zeros_like(time)
u[0] = 10
time, response = signal.impulse(system, T=time)
elif input_type == 'sinusoide':
u = np.sin(time*2*np.pi)
time, response, _ = signal.lsim(system, U=u, T=time)
elif input_type == 'rampa':
u = time
time, response, _ = signal.lsim(system, U=u, T=time)
else:
raise Exception("Errore nel programma. Fai ripartire la simulazione.")
response_plot.set_data(time, response)
input_plot.set_data(time, u)
rlocus.axhline(linewidth=.3, color='k')
rlocus.axvline(linewidth=.3, color='k')
rlocus.legend()
ax.set_ylim([min([np.min(u), min(response),-.1]),min(100,max([max(response)*1.05, 1, 1.05*np.max(u)]))])
ax.set_xlim([-0.1,max(time)])
plt.show()
controller_ = PID
object_ = obj
def calc_tf():
global num, den, controller_, object_
system_func = object_*controller_/(1+object_*controller_)
num = [sym.fraction(system_func.factor())[0].expand().coeff(s, i) for i in reversed(range(1+sym.degree(sym.fraction(system_func.factor())[0], gen=s)))]
den = [sym.fraction(system_func.factor())[1].expand().coeff(s, i) for i in reversed(range(1+sym.degree(sym.fraction(system_func.factor())[1], gen=s)))]
update_plot(Kp_widget.value, Ti_widget.value, Td_widget.value, time_span_widget.value)
def transfer_func(controller_type):
global controller_
proportional = P
integral = P/(I*s)
differential = P*D*s/(a*D*s+1)
if controller_type =='P':
controller_func = proportional
Kp_widget.disabled=False
Ti_widget.disabled=True
Td_widget.disabled=True
elif controller_type =='PI':
controller_func = proportional+integral
Kp_widget.disabled=False
Ti_widget.disabled=False
Td_widget.disabled=True
elif controller_type == 'PD':
controller_func = proportional+differential
Kp_widget.disabled=False
Ti_widget.disabled=True
Td_widget.disabled=False
else:
controller_func = proportional+integral+differential
Kp_widget.disabled=False
Ti_widget.disabled=False
Td_widget.disabled=False
controller_ = controller_func
calc_tf()
def transfer_func_obj(object_type):
global object_
if object_type == 'P0':
object_ = 2
elif object_type == 'P1':
object_ = 1/(2*s+1)
elif object_type == 'I0':
object_ = 1/(10*s)
elif object_type == 'I1':
object_ = 1/(s*(10*s+1))
calc_tf()
style = {'description_width': 'initial'}
def buttons_controller_clicked(event):
controller = buttons_controller.options[buttons_controller.index]
transfer_func(controller)
buttons_controller = widgets.ToggleButtons(
options=['P', 'PI', 'PD', 'PID'],
description='Controller:',
disabled=False,
style=style)
buttons_controller.observe(buttons_controller_clicked)
def buttons_object_clicked(event):
object_ = buttons_object.options[buttons_object.index]
transfer_func_obj(object_)
buttons_object = widgets.ToggleButtons(
options=['P0', 'P1', 'I0', 'I1'],
description='Sistema:',
disabled=False,
style=style)
buttons_object.observe(buttons_object_clicked)
def buttons_input_clicked(event):
global input_type
input_type = buttons_input.options[buttons_input.index]
update_plot(Kp_widget.value, Ti_widget.value, Td_widget.value, time_span_widget.value)
buttons_input = widgets.ToggleButtons(
options=['gradino','impulso', 'rampa', 'sinusoide'],
description='Input:',
disabled=False,
style=style)
buttons_input.observe(buttons_input_clicked)
Kp_widget = widgets.IntSlider(value=10,min=1,max=50,step=1,description=r'\(K_p\)',
disabled=False,continuous_update=True,orientation='horizontal',readout=True,readout_format='.1d')
Ti_widget = widgets.FloatLogSlider(value=1.,min=-3,max=1.1,step=.001,description=r'\(T_{i} \)',
disabled=False,continuous_update=True,orientation='horizontal',readout=True,readout_format='.3f')
Td_widget = widgets.FloatLogSlider(value=1.,min=-3,max=1.1,step=.001,description=r'\(T_{d} \)',
disabled=False,continuous_update=True,orientation='horizontal',readout=True,readout_format='.3f')
time_span_widget = widgets.FloatSlider(value=10.,min=.5,max=50.,step=0.1,description=r'\(t_{max} \)',
disabled=False,continuous_update=True,orientation='horizontal',readout=True,readout_format='.1f')
transfer_func(buttons_controller.options[buttons_controller.index])
transfer_func_obj(buttons_object.options[buttons_object.index])
display(buttons_input)
display(buttons_object)
display(buttons_controller)
interact(update_plot, KP=Kp_widget, TI=Ti_widget, TD=Td_widget, Time_span=time_span_widget);
###Output
_____no_output_____ |
docs/howto/convert-original-code.ipynb | ###Markdown
Original NYC Taxi ML Notebook
###Code
%pip install lightgbm shapely
import numpy as np
import pandas as pd
import scipy as scipy
import datetime as dt
from sklearn.model_selection import train_test_split
import lightgbm as lgbm
import os
import gc
import shapely.wkt
from io import StringIO
def clean_df(df):
return df[(df.fare_amount > 0) & (df.fare_amount <= 500) &
(df.PULocationID > 0) & (df.PULocationID <= 263) &
(df.DOLocationID > 0) & (df.DOLocationID <= 263)]
def radian_conv(degree):
"""
Return radian.
"""
return np.radians(degree)
# To Compute Haversine distance
def sphere_dist(pickup_lat, pickup_lon, dropoff_lat, dropoff_lon):
"""
Return distance along great radius between pickup and dropoff coordinates.
"""
#Define earth radius (km)
R_earth = 6371
#Convert degrees to radians
pickup_lat, pickup_lon, dropoff_lat, dropoff_lon = map(np.radians,
[pickup_lat, pickup_lon,
dropoff_lat, dropoff_lon])
#Compute distances along lat, lon dimensions
dlat = dropoff_lat - pickup_lat
dlon = dropoff_lon - pickup_lon
#Compute haversine distance
a = np.sin(dlat/2.0)**2 + np.cos(pickup_lat) * np.cos(dropoff_lat) * np.sin(dlon/2.0)**2
return 2 * R_earth * np.arcsin(np.sqrt(a))
def add_airport_dist(dataset):
"""
Return minimum distance from pickup or dropoff coordinates to each airport.
JFK: John F. Kennedy International Airport
EWR: Newark Liberty International Airport
LGA: LaGuardia Airport
SOL: Statue of Liberty
NYC: Newyork Central
"""
jfk_coord = (40.639722, -73.778889)
ewr_coord = (40.6925, -74.168611)
lga_coord = (40.77725, -73.872611)
sol_coord = (40.6892,-74.0445) # Statue of Liberty
nyc_coord = (40.7141667,-74.0063889)
pickup_lat = dataset['pickup_latitude']
dropoff_lat = dataset['dropoff_latitude']
pickup_lon = dataset['pickup_longitude']
dropoff_lon = dataset['dropoff_longitude']
pickup_jfk = sphere_dist(pickup_lat, pickup_lon, jfk_coord[0], jfk_coord[1])
dropoff_jfk = sphere_dist(jfk_coord[0], jfk_coord[1], dropoff_lat, dropoff_lon)
pickup_ewr = sphere_dist(pickup_lat, pickup_lon, ewr_coord[0], ewr_coord[1])
dropoff_ewr = sphere_dist(ewr_coord[0], ewr_coord[1], dropoff_lat, dropoff_lon)
pickup_lga = sphere_dist(pickup_lat, pickup_lon, lga_coord[0], lga_coord[1])
dropoff_lga = sphere_dist(lga_coord[0], lga_coord[1], dropoff_lat, dropoff_lon)
pickup_sol = sphere_dist(pickup_lat, pickup_lon, sol_coord[0], sol_coord[1])
dropoff_sol = sphere_dist(sol_coord[0], sol_coord[1], dropoff_lat, dropoff_lon)
pickup_nyc = sphere_dist(pickup_lat, pickup_lon, nyc_coord[0], nyc_coord[1])
dropoff_nyc = sphere_dist(nyc_coord[0], nyc_coord[1], dropoff_lat, dropoff_lon)
dataset['jfk_dist'] = pickup_jfk + dropoff_jfk
dataset['ewr_dist'] = pickup_ewr + dropoff_ewr
dataset['lga_dist'] = pickup_lga + dropoff_lga
dataset['sol_dist'] = pickup_sol + dropoff_sol
dataset['nyc_dist'] = pickup_nyc + dropoff_nyc
return dataset
def add_datetime_info(dataset):
#Convert to datetime format
dataset['pickup_datetime'] = pd.to_datetime(dataset['tpep_pickup_datetime'],format="%Y-%m-%d %H:%M:%S")
dataset['hour'] = dataset.pickup_datetime.dt.hour
dataset['day'] = dataset.pickup_datetime.dt.day
dataset['month'] = dataset.pickup_datetime.dt.month
dataset['weekday'] = dataset.pickup_datetime.dt.weekday
dataset['year'] = dataset.pickup_datetime.dt.year
return dataset
def get_zones_dict(zones_url):
zones_df = pd.read_csv(zones_url)
# Remove unnecessary fields
zones_df.drop(['Shape_Leng', 'Shape_Area', 'zone', 'LocationID', 'borough'], axis=1, inplace=True)
# Convert DF to dictionary
zones_dict = zones_df.set_index('OBJECTID').to_dict('index')
# Add lat/long to each zone
for zone in zones_dict:
shape = shapely.wkt.loads(zones_dict[zone]['the_geom'])
zones_dict[zone]['long'] = shape.centroid.x
zones_dict[zone]['lat'] = shape.centroid.y
return zones_dict
def get_zone_lat(zones_dict, zone_id):
return zones_dict[zone_id]['lat']
def get_zone_long(zones_dict, zone_id):
return zones_dict[zone_id]['long']
zones_dict = get_zones_dict('https://s3.wasabisys.com/iguazio/data/Taxi/taxi_zones.csv')
new_train_df = pd.read_csv('https://s3.wasabisys.com/iguazio/data/Taxi/yellow_tripdata_2019-01_subset.csv')
new_train_df = clean_df(new_train_df)
# This can take a minute or two
new_train_df['pickup_latitude'] = new_train_df.apply(lambda x: get_zone_lat(zones_dict, x['PULocationID']), axis=1 )
new_train_df['pickup_longitude'] = new_train_df.apply(lambda x: get_zone_long(zones_dict, x['PULocationID']), axis=1 )
new_train_df['dropoff_latitude'] = new_train_df.apply(lambda x: get_zone_lat(zones_dict, x['DOLocationID']), axis=1 )
new_train_df['dropoff_longitude'] = new_train_df.apply(lambda x: get_zone_long(zones_dict, x['DOLocationID']), axis=1 )
new_train_df = add_datetime_info(new_train_df)
new_train_df = add_airport_dist(new_train_df)
new_train_df['pickup_latitude'] = radian_conv(new_train_df['pickup_latitude'])
new_train_df['pickup_longitude'] = radian_conv(new_train_df['pickup_longitude'])
new_train_df['dropoff_latitude'] = radian_conv(new_train_df['dropoff_latitude'])
new_train_df['dropoff_longitude'] = radian_conv(new_train_df['dropoff_longitude'])
y = new_train_df['fare_amount']
# Remove unnecessary fields
new_train_df.drop(['VendorID', 'tpep_pickup_datetime', 'tpep_dropoff_datetime', 'congestion_surcharge', 'improvement_surcharge', 'pickup_datetime',
'extra', 'mta_tax', 'tip_amount', 'tolls_amount', 'total_amount', 'RatecodeID', 'store_and_fwd_flag',
'PULocationID', 'DOLocationID', 'payment_type', 'fare_amount'],
axis=1, inplace=True, errors='ignore')
x_train,x_test,y_train,y_test = train_test_split(new_train_df,y,random_state=123,test_size=0.10)
del new_train_df
del y
gc.collect()
params = {
'boosting_type':'gbdt',
'objective': 'regression',
'nthread': 4,
'num_leaves': 31,
'learning_rate': 0.05,
'max_depth': -1,
'subsample': 0.8,
'bagging_fraction' : 1,
'max_bin' : 5000 ,
'bagging_freq': 20,
'colsample_bytree': 0.6,
'metric': 'rmse',
'min_split_gain': 0.5,
'min_child_weight': 1,
'min_child_samples': 10,
'scale_pos_weight':1,
'zero_as_missing': True,
'seed':0,
'num_rounds':50000
}
train_set = lgbm.Dataset(x_train, y_train, silent=False,categorical_feature=['year','month','day','weekday'])
valid_set = lgbm.Dataset(x_test, y_test, silent=False,categorical_feature=['year','month','day','weekday'])
model = lgbm.train(params, train_set = train_set, num_boost_round=10000,early_stopping_rounds=500,verbose_eval=500, valid_sets=valid_set)
del x_train
del y_train
del x_test
del y_test
gc.collect()
test_data = StringIO("""
passenger_count,trip_distance,pickup_latitude,pickup_longitude,dropoff_latitude,dropoff_longitude,hour,day,month,weekday,year,jfk_dist,ewr_dist,lga_dist,sol_dist,nyc_dist
1,0.80,0.711950,-1.291073,0.712059,1.290988,13,1,1,1,2019,47.274013,40.386065,16.975747,26.587155,18.925788
""")
test_df = pd.read_csv(test_data, sep=",")
test_df
#Predict from test set
prediction = model.predict(test_df, num_iteration = model.best_iteration)
print(prediction)
###Output
[9.53629134]
###Markdown
Original NYC Taxi ML Notebook
###Code
%pip install lightgbm shapely
import numpy as np
import pandas as pd
import scipy as scipy
import datetime as dt
from sklearn.model_selection import train_test_split
import lightgbm as lgbm
import os
import gc
import shapely.wkt
from io import StringIO
def clean_df(df):
return df[(df.fare_amount > 0) & (df.fare_amount <= 500) &
(df.PULocationID > 0) & (df.PULocationID <= 263) &
(df.DOLocationID > 0) & (df.DOLocationID <= 263)]
def radian_conv(degree):
"""
Return radian.
"""
return np.radians(degree)
# To Compute Haversine distance
def sphere_dist(pickup_lat, pickup_lon, dropoff_lat, dropoff_lon):
"""
Return distance along great radius between pickup and dropoff coordinates.
"""
#Define earth radius (km)
R_earth = 6371
#Convert degrees to radians
pickup_lat, pickup_lon, dropoff_lat, dropoff_lon = map(np.radians,
[pickup_lat, pickup_lon,
dropoff_lat, dropoff_lon])
#Compute distances along lat, lon dimensions
dlat = dropoff_lat - pickup_lat
dlon = dropoff_lon - pickup_lon
#Compute haversine distance
a = np.sin(dlat/2.0)**2 + np.cos(pickup_lat) * np.cos(dropoff_lat) * np.sin(dlon/2.0)**2
return 2 * R_earth * np.arcsin(np.sqrt(a))
def add_airport_dist(dataset):
"""
Return minumum distance from pickup or dropoff coordinates to each airport.
JFK: John F. Kennedy International Airport
EWR: Newark Liberty International Airport
LGA: LaGuardia Airport
SOL: Statue of Liberty
NYC: Newyork Central
"""
jfk_coord = (40.639722, -73.778889)
ewr_coord = (40.6925, -74.168611)
lga_coord = (40.77725, -73.872611)
sol_coord = (40.6892,-74.0445) # Statue of Liberty
nyc_coord = (40.7141667,-74.0063889)
pickup_lat = dataset['pickup_latitude']
dropoff_lat = dataset['dropoff_latitude']
pickup_lon = dataset['pickup_longitude']
dropoff_lon = dataset['dropoff_longitude']
pickup_jfk = sphere_dist(pickup_lat, pickup_lon, jfk_coord[0], jfk_coord[1])
dropoff_jfk = sphere_dist(jfk_coord[0], jfk_coord[1], dropoff_lat, dropoff_lon)
pickup_ewr = sphere_dist(pickup_lat, pickup_lon, ewr_coord[0], ewr_coord[1])
dropoff_ewr = sphere_dist(ewr_coord[0], ewr_coord[1], dropoff_lat, dropoff_lon)
pickup_lga = sphere_dist(pickup_lat, pickup_lon, lga_coord[0], lga_coord[1])
dropoff_lga = sphere_dist(lga_coord[0], lga_coord[1], dropoff_lat, dropoff_lon)
pickup_sol = sphere_dist(pickup_lat, pickup_lon, sol_coord[0], sol_coord[1])
dropoff_sol = sphere_dist(sol_coord[0], sol_coord[1], dropoff_lat, dropoff_lon)
pickup_nyc = sphere_dist(pickup_lat, pickup_lon, nyc_coord[0], nyc_coord[1])
dropoff_nyc = sphere_dist(nyc_coord[0], nyc_coord[1], dropoff_lat, dropoff_lon)
dataset['jfk_dist'] = pickup_jfk + dropoff_jfk
dataset['ewr_dist'] = pickup_ewr + dropoff_ewr
dataset['lga_dist'] = pickup_lga + dropoff_lga
dataset['sol_dist'] = pickup_sol + dropoff_sol
dataset['nyc_dist'] = pickup_nyc + dropoff_nyc
return dataset
def add_datetime_info(dataset):
#Convert to datetime format
dataset['pickup_datetime'] = pd.to_datetime(dataset['tpep_pickup_datetime'],format="%Y-%m-%d %H:%M:%S")
dataset['hour'] = dataset.pickup_datetime.dt.hour
dataset['day'] = dataset.pickup_datetime.dt.day
dataset['month'] = dataset.pickup_datetime.dt.month
dataset['weekday'] = dataset.pickup_datetime.dt.weekday
dataset['year'] = dataset.pickup_datetime.dt.year
return dataset
def get_zones_dict(zones_url):
zones_df = pd.read_csv(zones_url)
# Remove unecessary fields
zones_df.drop(['Shape_Leng', 'Shape_Area', 'zone', 'LocationID', 'borough'], axis=1, inplace=True)
# Convert DF to dictionary
zones_dict = zones_df.set_index('OBJECTID').to_dict('index')
# Add lat/long to each zone
for zone in zones_dict:
shape = shapely.wkt.loads(zones_dict[zone]['the_geom'])
zones_dict[zone]['long'] = shape.centroid.x
zones_dict[zone]['lat'] = shape.centroid.y
return zones_dict
def get_zone_lat(zones_dict, zone_id):
return zones_dict[zone_id]['lat']
def get_zone_long(zones_dict, zone_id):
return zones_dict[zone_id]['long']
zones_dict = get_zones_dict('https://s3.wasabisys.com/iguazio/data/Taxi/taxi_zones.csv')
new_train_df = pd.read_csv('https://s3.wasabisys.com/iguazio/data/Taxi/yellow_tripdata_2019-01_subset.csv')
new_train_df = clean_df(new_train_df)
# This can take a minute or two
new_train_df['pickup_latitude'] = new_train_df.apply(lambda x: get_zone_lat(zones_dict, x['PULocationID']), axis=1 )
new_train_df['pickup_longitude'] = new_train_df.apply(lambda x: get_zone_long(zones_dict, x['PULocationID']), axis=1 )
new_train_df['dropoff_latitude'] = new_train_df.apply(lambda x: get_zone_lat(zones_dict, x['DOLocationID']), axis=1 )
new_train_df['dropoff_longitude'] = new_train_df.apply(lambda x: get_zone_long(zones_dict, x['DOLocationID']), axis=1 )
new_train_df = add_datetime_info(new_train_df)
new_train_df = add_airport_dist(new_train_df)
new_train_df['pickup_latitude'] = radian_conv(new_train_df['pickup_latitude'])
new_train_df['pickup_longitude'] = radian_conv(new_train_df['pickup_longitude'])
new_train_df['dropoff_latitude'] = radian_conv(new_train_df['dropoff_latitude'])
new_train_df['dropoff_longitude'] = radian_conv(new_train_df['dropoff_longitude'])
y = new_train_df['fare_amount']
# Remove unecessary fields
new_train_df.drop(['VendorID', 'tpep_pickup_datetime', 'tpep_dropoff_datetime', 'congestion_surcharge', 'improvement_surcharge', 'pickup_datetime',
'extra', 'mta_tax', 'tip_amount', 'tolls_amount', 'total_amount', 'RatecodeID', 'store_and_fwd_flag',
'PULocationID', 'DOLocationID', 'payment_type', 'fare_amount'],
axis=1, inplace=True, errors='ignore')
x_train,x_test,y_train,y_test = train_test_split(new_train_df,y,random_state=123,test_size=0.10)
del new_train_df
del y
gc.collect()
params = {
'boosting_type':'gbdt',
'objective': 'regression',
'nthread': 4,
'num_leaves': 31,
'learning_rate': 0.05,
'max_depth': -1,
'subsample': 0.8,
'bagging_fraction' : 1,
'max_bin' : 5000 ,
'bagging_freq': 20,
'colsample_bytree': 0.6,
'metric': 'rmse',
'min_split_gain': 0.5,
'min_child_weight': 1,
'min_child_samples': 10,
'scale_pos_weight':1,
'zero_as_missing': True,
'seed':0,
'num_rounds':50000
}
train_set = lgbm.Dataset(x_train, y_train, silent=False,categorical_feature=['year','month','day','weekday'])
valid_set = lgbm.Dataset(x_test, y_test, silent=False,categorical_feature=['year','month','day','weekday'])
model = lgbm.train(params, train_set = train_set, num_boost_round=10000,early_stopping_rounds=500,verbose_eval=500, valid_sets=valid_set)
del x_train
del y_train
del x_test
del y_test
gc.collect()
test_data = StringIO("""
passenger_count,trip_distance,pickup_latitude,pickup_longitude,dropoff_latitude,dropoff_longitude,hour,day,month,weekday,year,jfk_dist,ewr_dist,lga_dist,sol_dist,nyc_dist
1,0.80,0.711950,-1.291073,0.712059,1.290988,13,1,1,1,2019,47.274013,40.386065,16.975747,26.587155,18.925788
""")
test_df = pd.read_csv(test_data, sep=",")
test_df
#Predict from test set
prediction = model.predict(test_df, num_iteration = model.best_iteration)
print(prediction)
###Output
[9.53629134]
|
opends4all-resources/opends4all-scalable-data-processing/temp/EFFICIENT-DATA-PROCESSING-architecture-algorithms-intermediate.ipynb | ###Markdown
Lecture Notebook: Making Choices about Data Representation and Processing LinkedIn Social AnalysisThis module explores concepts in:* Designing data representations to capture important relationships* Reasoning over graphs* Exploring and traversing graphs* Performance implications of design choices* Techniques for indexing, parallelism, and sequenceIt sets the stage understanding cloud/cluster-compute (parallel) data processing.
###Code
!pip install pymongo[tls,srv]
!pip install swifter
!pip install lxml
import pandas as pd
import numpy as np
# JSON parsing
import json
# HTML parsing
from lxml import etree
import urllib
# SQLite RDBMS
import sqlite3
# Time conversions
import time
# Parallel processing
import swifter
# NoSQL DB
from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError, OperationFailure
import os
import zipfile
###Output
_____no_output_____
###Markdown
Part A: Getting the DataWe use a synthetic linkedin data to test this notebook. The Lecture Notebook on Modeling Data and Knowledge shows how to get and process the data. We wrap up those steps in a file represented as 'module2dataloading.py' for you to use.
###Code
# Getting the data processing script, which was covered in the modelling data module.
# url = 'https://XXX/module2dataloading.py'
# urllib.request.urlretrieve(url,filename='module2dataloading.py')
# Also, get the linkedin data.
# url = 'X'
# filehandle, _ = urllib.request.urlretrieve(url,filename='local.zip')
# def fetch_file(fname):
# zip_file_object = zipfile.ZipFile(filehandle, 'r')
# for file in zip_file_object.namelist():
# file = zip_file_object.open(file)
# if file.name == fname: return file
# return None
# linkedin_small = fetch_file('linkedin_small.json')# 100K records
# # note that linkedin_tiny.json has bug. Do not use!
from module2dataloading import *
import importlib
# If use colab and want to mount google drive as 'local' folder, then run this cell.
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
# If want to load data locally, use open() function.
data_loading(file=open('/content/drive/My Drive/Colab Notebooks/test_data_10000.json'), dbname='linkedin.db', filetype='localobj', LIMIT=20000)
###Output
10000
###Markdown
Part B: Big Data Takes a Long Time to ProcessThis dataset is very big, and processing it may take a long time depending on how the processing is performed. We'll explore this, and see how we can improve performance. Then we'll see how an SQL database automatically finds good ways to execute queries.
###Code
%%time
# 10,000 records from linkedin
# Note that we are loading all the data into a dataframe first, then selecting the rows we want.
# linked_in = fetch_file('linkedin_small.json')
linked_in = open('/content/drive/My Drive/Colab Notebooks/test_data_10000.json')
people = []
i=1
for line in linked_in:
person = json.loads(line)
people.append(person)
if(i % 10000==0):
print(i)
if(i == 20000):
break
i += 1
people_df = pd.DataFrame(people)
people_df[people_df['industry'] == 'Medical Devices']
%%time
# 10,000 records from linkedin
# Note that we are selecting the data we want as we loading the data into a dataframe.
# linked_in = fetch_file('linkedin_small.json')
linked_in = open('/content/drive/My Drive/Colab Notebooks/test_data_10000.json')
people = []
i = 1
for line in linked_in:
person = json.loads(line)
if 'industry' in person and person['industry'] == 'Medical Devices':
people.append(person)
if(i % 10000 == 0):
print(i)
if( i == 20000):
break
i += 1
people_df = pd.DataFrame(people)
people_df
###Output
10000
CPU times: user 908 ms, sys: 53.2 ms, total: 961 ms
Wall time: 967 ms
###Markdown
SQL query without an indexIn the above, we rewrote the processing to perform the filter (industry is Medical Devices) early. However, SQL databases will automatically "push down" selection and projection where feasible. They also don't need to parse the data. Here we assume that the data is already in a relational database (so it is not a head-to-head comparison with the above).
###Code
conn = sqlite3.connect('linkedin.db')
## This is just to reset things so we don't have an index
conn.execute('begin transaction')
conn.execute('drop index if exists people_industry')
conn.execute('commit')
%%time
pd.read_sql_query('select * from people where industry="Medical Devices"', conn)
###Output
CPU times: user 7.02 ms, sys: 12.1 ms, total: 19.1 ms
Wall time: 21.6 ms
###Markdown
Let's build an index now...To speed up the SQL query processing, we can build an index.
###Code
conn = sqlite3.connect('linkedin.db')
conn.execute('begin transaction')
conn.execute('drop index if exists people_industry')
conn.execute("create index people_industry on people(industry)")
conn.execute('commit')
%%time
# Treat the view as a table, see what's there
pd.read_sql_query('select * from people where industry="Medical Devices"', conn)
# In our tests, this was 5x faster!
conn = sqlite3.connect('linkedin.db')
people_df = pd.read_sql_query('select * from people limit 500', conn)
experience_df = pd.read_sql_query('select * from experience limit 5000', conn)
skills_df = pd.read_sql_query('select * from skills limit 8000', conn)
print ("%d people"%len(people_df))
print ("%d experiences"%len(experience_df))
print ("%d skills"%len(skills_df))
# Implement a dataframe merge in Python.
def merge(S,T,l_on,r_on):
ret = pd.DataFrame()
count = 0
for s_index in range(0, len(S)):
for t_index in range(0, len(T)):
count = count + 1
if S.loc[s_index, l_on] == T.loc[t_index, r_on]:
ret = ret.append(S.loc[s_index].append(T.loc[t_index].drop(labels=r_on)), ignore_index=True)
print('Merge compared %d tuples'%count)
return ret
%%time
# Here's a test join, with people and their experiences. We can see how many
# comparisons are made
merge(people_df, experience_df, '_id', 'person')
# Let's find all people (by ID) who have Marketing as a skill
mktg_df = skills_df[skills_df['value'] == 'Marketing'].reset_index()[['person']]
mktg_df
%%time
# Test differences in join order (Part 1)
merge(merge(people_df, experience_df, '_id', 'person'), mktg_df, '_id', 'person')
%%time
# Test differences in join order (Part 2)
merge(merge(people_df, mktg_df, '_id', 'person'), experience_df, '_id', 'person')
experience_df.loc[0].drop(labels='person')
%%time
# Slide 21
conn.execute('drop view if exists people500')
conn.execute('drop view if exists experience5000')
conn.execute('drop view if exists skills8000')
conn.execute('create view people500 as select * from people limit 500')
conn.execute('create view experience5000 as select * from experience limit 500')
conn.execute('create view skills8000 as select * from skills limit 500')
pd.read_sql_query('select * from (people500 join skills8000 on _id=person) ps join ' + \
'experience5000 ex on ps._id=ex.person and value="Marketing"', conn)
# Join using a *map*, which is a kind of in-memory index
# from keys to (single) values
def merge_map(S,T,l_on,r_on):
ret = pd.DataFrame()
T_map = {}
count = 0
# Take each value in the r_on field, and
# make a map entry for it
for t_index in range(0, len(T)):
# Make sure we aren't overwriting an entry!
assert (T.loc[t_index,r_on] not in T_map)
T_map[T.loc[t_index,r_on]] = T.loc[t_index]
count = count + 1
# Now find matches
for s_index in range(0, len(S)):
count = count + 1
if S.loc[s_index, l_on] in T_map:
ret = ret.append(S.loc[s_index].append(T_map[S.loc[s_index, l_on]].drop(labels=r_on)), ignore_index=True)
print('Merge compared %d tuples'%count)
return ret
%%time
# Here's a test join, with people and their experiences. We can see how many
# comparisons are made
merge_map(experience_df, people_df, 'person', '_id')
%%time
# An exercise: how can you modify merge_map to make this work? (This can be skipped if you wish.)
merge_map(people_df, experience_df, '_id', 'person')
###Output
_____no_output_____ |
SciPy2016/notebooks/PF/.ipynb_checkpoints/Magnetics_PosterFigures-checkpoint.ipynb | ###Markdown
**Magnetostatic**In the absence of free-currents or changing magnetic field, magnetic material can give rise to a secondary magnetic field according to:$$\mathbf{b} = \frac{\mu_0}{4\pi} \int_{V} \mathbf{M} \cdot \nabla \nabla \left(\frac{1}{r}\right) \; dV $$Where $\mu_0$ is the magnetic permealitity of free-space, $\mathbf{M}$ is the magnetization per unit volume and $r$ defines the distance between the observed field $\mathbf{b}$ and the magnetized object. Assuming a purely induced response, the strength of magnetization can be written as:$$ \mathbf{M} = \mu_0 \kappa \mathbf{H}_0 $$where $\kappa$ is the magnetic susceptibility, a unitless quantity describing the ability of matter to become magnetized in the direction of the Earth's field $\mathbf{H}_0$As derived by Sharma 1966, the integral can be evaluated for rectangular prisms such that:$$ \mathbf{b} = \mathbf{T} \cdot \mathbf{H}_0 \; \kappa $$Where the tensor matrix $\bf{T}$ relates the three components of magnetization $\mathbf{M}$ to the components of the field $\mathbf{b}$.This is a simple linear system we can invert.**Gravity**The relation between density and the gravity field is well known, thanks to the classic work of Newton in 1686. Since we generally only measure the vertical component of the field, this relationship can be written as:$$G(r)_z = \gamma \int_{V} \rho(r) \left(\frac{z - z_0}{{|\vec r - \vec r_0|}^3}\right) \; dV $$where $\rho$ is the anomalous density and $\gamma$ is the Newton's gravitational constant.Once again, this integral can be evaluated analytically for simple prisms, giving rise to a linear system of equations relating a discrete Earth to the observed data:|$$ \mathbf{d}_z = \mathbf{G}_z \; \rho $$
###Code
%pylab inline
import SimPEG.PF as PF
from SimPEG import Utils, Mesh, Maps
from SimPEG.Utils import io_utils
import matplotlib
matplotlib.rcParams['font.size'] = 14
import matplotlib.patches as patches
###Output
_____no_output_____
###Markdown
**Analytical Field**We can first look at what the fields look like for simple susceptible and density anomalies.
###Code
# Plot a dipole field for sketch
xmin, xmax = -5., 5.
zmin, zmax = -5., 5.
nc = 11
R = 1.
x0, y0, z0 = 0.5, 0.5, 0.5
chi = 1
G = 1.
Ho = np.asarray([[1,0,1],np.r_[1,0,1]])
# Compute MAG fields
x, y, z = np.meshgrid(np.linspace(xmin, xmax, nc), np.zeros(1), np.linspace(zmin, zmax, nc))
Bx1, By1, Bz1 = PF.MagAnalytics.MagSphereFreeSpace(x, y, z, R, x0, y0, z0, chi, Ho)
Bx2, By2, Bz2 = PF.MagAnalytics.MagSphereFreeSpace(x, y, z, R/2., x0+2., y0, z0+0.5, chi*2., Ho)
Bx = (Bx1+Bx2).reshape((nc,nc))
Bz = (Bz1+Bz2).reshape((nc,nc))
lBl = np.sqrt(Bx**2. + Bz**2.)
# Compute Gravity field
Gx1, Gz1 = G*(np.pi*R**2.)*np.r_[(x[:]-x0),(z[:]-z0)]/((x-x0+1e-1)**2.+(z-z0+1e-1)**2.)
Gx2, Gz2 = G*(2*np.pi*(R/2.)**2.)*np.r_[(x[:]-(x0+2.)),(z[:]-(z0+0.5))]/((x-x0+1e-1)**2.+(z-z0+1e-1)**2.)
Gx = (Gx1+Gx2).reshape((nc,nc))
Gz = (Gz1+Gz2).reshape((nc,nc))
lGl = np.sqrt(Gx**2. + Gz**2.)
# Plot vector field
fig = plt.figure(figsize = (8,4))
ax0 = plt.subplot(1,2,1)
lw = 5.*(lBl / lBl.max())
stp = streamplot(z[0,:,:], x[0,:,:], Bz, Bx,color='k', linewidth=lw, density=0.5,arrowsize=2)
# circle1= plt.Circle((x0,z0),R,color='b',fill=True, lw=3)
# ax0.add_artist(circle1)
# circle1= plt.Circle((x0+2.,z0+0.5),R/2.,color='r',fill=True, lw=3)
# ax0.add_artist(circle1)
ax0.add_patch(patches.Rectangle((xmin, zmin),10., 10./1.5, alpha=0.1,color='grey' ))
plt.gca().set_aspect('equal', adjustable='box')
plt.xlim([xmin,xmax])
plt.ylim([zmin/2.,zmax])
plt.axis('off')
plt.tight_layout()
plt.rc('text', usetex=True)
plt.title(r'$\vec B$')
ax0 = plt.subplot(1,2,2)
lw = 5.*(lGl / lGl.max())
stp = streamplot(z[0,:,:], x[0,:,:], Gz, Gx,color='k', linewidth=lw, density=0.5,arrowsize=2)
# circle1= plt.Circle((x0,z0),R,color='b',fill=True, lw=3)
# ax0.add_artist(circle1)
# circle1= plt.Circle((x0+2.,z0+0.5),R/2.,color='r',fill=True, lw=3)
# ax0.add_artist(circle1)
ax0.add_patch(patches.Rectangle((xmin, zmin),10., 10./1.5, alpha=0.1,color='grey' ))
plt.gca().set_aspect('equal', adjustable='box')
plt.xlim([xmin,xmax])
plt.ylim([zmin/2.,zmax])
plt.title(r'$\vec G$')
plt.axis('off')
fig.savefig('PF_Sketch.png',dpi = 150)
###Output
_____no_output_____
###Markdown
**Plots for TKC**If the model files are not already in the directory, you need to run the inversion notebook "Magnetic over TKC.pync"
###Code
import os
model_dir = "Models\\"
# Load the mesh, model and data
mesh = Mesh.TensorMesh.readUBC(model_dir+"PF_mesh_UTM.msh")
# Load models
m_lp = Mesh.TensorMesh.readModelUBC(mesh,model_dir+"SimPEG_MAG_lplq.sus")
m_l2 = Mesh.TensorMesh.readModelUBC(mesh,model_dir+"SimPEG_MAG_l2l2.sus")
m_true = Mesh.TensorMesh.readModelUBC(mesh,model_dir+"Synthetic_mag.sus")
# m_lp = Mesh.TensorMesh.readModelUBC(mesh,"SimPEG_GRAV_lplq.den")
# m_l2 = Mesh.TensorMesh.readModelUBC(mesh,"SimPEG_GRAV_l2l2.den")
# m_true = Mesh.TensorMesh.readModelUBC(mesh,"Synthetic_Grav.den")
mesh.writeVTK('MAG_model.vtr',{'Sus':m_true})
airc = m_true == -1
m_lp[airc] = np.nan
m_l2[airc] = np.nan
m_true[airc] = np.nan
# Load data
temp = PF.MagneticsDriver.MagneticsDriver_Inv()
temp.basePath = os.getcwd() + os.path.sep
survey = temp.readMagneticsObservations(model_dir+"MAG_Synthetic_data.obs")
# temp = PF.GravityDriver.GravityDriver_Inv()
# temp.basePath = os.getcwd() + os.path.sep
# survey = temp.readGravityObservations(model_dir+"GRAV_Synthetic_data.obs")
# survey.srcField.rxList[0].locs[:,0] = survey.srcField.rxList[0].locs[:,0] - 557300.
# survey.srcField.rxList[0].locs[:,1] = survey.srcField.rxList[0].locs[:,1] - 7133600.
print mesh.x0[1] + 557300
fig = plt.figure(figsize(11, 8))
vmin, vmax = 0., 0.015
xmin, xmax = -500 + 557300, 500 + 557300
ymin, ymax = -500 + 7133600, 500 + 7133600
zmin, zmax = -500 + 425, 0 + 425
indz = 46
indx = 38
# Axis label
x = np.linspace(xmin+200, xmax-200,3)
ax1 = plt.subplot(1,1,1)
pos = ax1.get_position()
ax1.set_position([pos.x0-0.1, pos.y0+0.3, pos.width*0.5, pos.height*0.5])
dat = mesh.plotSlice(m_l2, ax = ax1, normal='Z', ind=indz, clim=np.r_[vmin, vmax],pcolorOpts={'cmap':'viridis'})
# plt.colorbar(dat[0])
plt.gca().set_aspect('equal')
plt.title('Smooth')
ax1.xaxis.set_visible(False)
xlim(xmin, xmax)
ylim(ymin, ymax)
ylabel('Northing (m)')
# ax2 = plt.subplot(2,2,3)
pos = ax1.get_position()
ax2 = fig.add_axes([pos.x0+0.055, pos.y0 - 0.3, pos.width*0.725, pos.height])
# ax2.yaxis.set_visible(False)
# ax2.set_position([pos.x0 -0.04 , pos.y0, pos.width, pos.height])
dat = mesh.plotSlice(m_l2, ax = ax2, normal='Y', ind=indx, clim=np.r_[vmin, vmax],pcolorOpts={'cmap':'viridis'})
# plt.colorbar(dat[0])
plt.gca().set_aspect('equal')
plt.title('')
xlim(xmin, xmax)
ylim(zmin, zmax)
ax2.set_xticks(map(int, x))
ax2.set_xticklabels(map(str, map(int, x)),size=12)
xlabel('Easting (m)')
ylabel('Elev. (m)')
## Add compact model
ax3 = fig.add_axes([pos.x0+0.3, pos.y0, pos.width, pos.height])
dat = mesh.plotSlice(m_lp, ax = ax3, normal='Z', ind=indz, clim=np.r_[vmin, vmax],pcolorOpts={'cmap':'viridis'})
# plt.colorbar(dat[0])
plt.gca().set_aspect('equal')
plt.title('Compact')
ax3.xaxis.set_visible(False)
ax3.yaxis.set_visible(False)
xlim(xmin, xmax)
ylim(ymin, ymax)
ax4 = fig.add_axes([pos.x0+0.355, pos.y0 - 0.3, pos.width*0.725, pos.height])
# ax2.yaxis.set_visible(False)
# ax2.set_position([pos.x0 -0.04 , pos.y0, pos.width, pos.height])
dat = mesh.plotSlice(m_lp, ax = ax4, normal='Y', ind=indx, clim=np.r_[vmin, vmax],pcolorOpts={'cmap':'viridis'})
# plt.colorbar(dat[0])
plt.gca().set_aspect('equal')
ax4.yaxis.set_visible(False)
plt.title('')
xlim(xmin, xmax)
ylim(zmin, zmax)
ax4.set_xticks(map(int, x))
ax4.set_xticklabels(map(str, map(int, x)),size=12)
xlabel('Easting (m)')
ylabel('Elev. (m)')
## Add True model
ax5 = fig.add_axes([pos.x0+0.6, pos.y0, pos.width, pos.height])
dat = mesh.plotSlice(m_true, ax = ax5, normal='Z', ind=indz, clim=np.r_[vmin, vmax],pcolorOpts={'cmap':'viridis'})
# plt.colorbar(dat[0])
plt.gca().set_aspect('equal')
plt.title('True model')
ax5.xaxis.set_visible(False)
ax5.yaxis.set_visible(False)
xlim(xmin, xmax)
ylim(ymin, ymax)
ax6 = fig.add_axes([pos.x0+0.655, pos.y0 - 0.3, pos.width*0.725, pos.height])
# ax2.yaxis.set_visible(False)
# ax2.set_position([pos.x0 -0.04 , pos.y0, pos.width, pos.height])
dat = mesh.plotSlice(m_true, ax = ax6, normal='Y', ind=indx, clim=np.r_[vmin, vmax],pcolorOpts={'cmap':'viridis'})
# plt.colorbar(dat[0])
plt.gca().set_aspect('equal')
ax6.yaxis.set_visible(False)
plt.title('')
xlim(xmin, xmax)
ylim(zmin, zmax)
ax6.set_xticks(map(int, x))
ax6.set_xticklabels(map(str, map(int, x)),size=12)
xlabel('Easting (m)')
ylabel('Elev. (m)')
pos = ax4.get_position()
cbarax = fig.add_axes([pos.x0 , pos.y0-0.025 , pos.width, pos.height*0.1]) ## the parameters are the specified position you set
cb = fig.colorbar(dat[0],cax=cbarax, orientation="horizontal", ax = ax4, ticks=np.linspace(vmin,vmax, 4))
# cb.set_label("Susceptibility (SI)",size=12)
# fig.savefig('MAG_RecModel.png',dpi = 200)
cb.set_label("Susceptibility (SI)",size=12)
fig.savefig('MAG_RecModel.png',dpi = 200)
# Plot some fields
fig = plt.figure(figsize=(8,7))
fig = PF.Magnetics.plot_obs_2D(survey.srcField.rxList[0].locs,survey.dobs, fig=fig)
title('Magnetic Data (nT)')
xlabel('Easting (m)')
ylabel('Northing (m)')
fig.savefig('MAG_Data.png',dpi = 200)
# fig = PF.Magnetics.plot_obs_2D(survey.srcField.rxList[0].locs,survey.dobs, fig=fig)
# title('Gravity Anomaly (mGal)')
# xlabel('Easting (m)')
# ylabel('Northing (m)')
# fig.savefig('GRAV_Data.png',dpi = 200)
# Run simulation to get fields through the pipe
# We create a synthetic survey with observations in cell center.
def genFields_Plane(xlim,ylim,zplane,normal='Z',surveyType = 'MAG'):
if normal=='Z':
x, y = np.linspace(xlim[0],xlim[1],11), np.linspace(ylim[0],ylim[1],11)
X, Y = np.meshgrid(x, y)
Z = np.ones(X.shape)*zplane
elif normal == 'X':
x, y = np.linspace(xlim[0],xlim[1],11), np.linspace(ylim[0],ylim[1],11)
Y, Z = np.meshgrid(x, y)
X = np.ones(Y.shape)*zplane
else:
x, y = np.linspace(xlim[0],xlim[1],11), np.linspace(ylim[0],ylim[1],11)
X, Z = np.meshgrid(x, y)
Y = np.ones(X.shape)*zplane
rxLoc = np.c_[Utils.mkvc(X.T), Utils.mkvc(Y.T), Utils.mkvc(Z.T)]
if surveyType == "MAG":
rxLoc = PF.BaseMag.RxObs(rxLoc)
srcField = PF.BaseMag.SrcField([rxLoc])
srcField.param = survey.srcField.param
section = PF.BaseMag.LinearSurvey(srcField)
m = m_true
m[airc] = 0.
actv = m > 1e-4
m = m[actv]
# Creat reduced identity map
idenMap = Maps.IdentityMap(nP = int(np.sum(actv)))
# Create the forward model operator
prob = PF.Magnetics.Problem3D_Integral(mesh, forwardOnly=True, rtype = 'xyz', actInd = actv, mapping = idenMap)
elif surveyType == "GRAV":
rxLoc = PF.BaseGrav.RxObs(rxLoc)
srcField = PF.BaseGrav.SrcField([rxLoc])
section = PF.BaseGrav.LinearSurvey(srcField)
m = m_true
m[airc] = 0.
actv = m > 1e-4
m = m[actv]
# Creat reduced identity map
idenMap = Maps.IdentityMap(nP = int(np.sum(actv)))
# Create the forward model operator
prob = PF.Gravity.GravityIntegral(mesh, forwardOnly=True, rtype = 'z', actInd = actv, mapping = idenMap)
# Pair the survey and problem
section.pair(prob)
# Compute fields
d = prob.fields(m)
return d, x, y
vmin, vmax = -0.05, 0.015
fig = plt.figure(figsize(6,9))
# Reshape the fields and plot
fld, x ,y = genFields_Plane((-500,500),(-500,500),mesh.vectorCCz[indz],normal='Z', surveyType="MAG")
ndata = len(x)*len(y)
fld_x = fld[:ndata].reshape((len(y),len(x)))
fld_y = fld[ndata:2*ndata].reshape((len(y),len(x)))
fld_z = -fld[2*ndata:].reshape((len(y),len(x)))
fld_B = np.sqrt(fld_x**2 + fld_y**2+ fld_z**2)
padx = 4
m_true[airc] = np.nan
ax1 = plt.subplot(1,1,1)
pos = ax1.get_position()
ax1.set_position([pos.x0, pos.y0+.2, pos.width, pos.height])
dat = mesh.plotSlice(m_true, ax = ax1, normal='Z', ind=indz, clim=np.r_[vmin, vmax],pcolorOpts={'cmap':'viridis'})
# plt.colorbar(dat[0])
plt.gca().set_aspect('equal')
plt.title('Bxy-field')
strp = ax1.streamplot(x, y, fld_x, fld_y,color='k',density=1, linewidth = 2., arrowsize = 5)
ax1.xaxis.set_visible(False)
xlim(-500, 500)
ylim(-500, 500)
# Reshape the fields and plot
fld, x ,y = genFields_Plane((-500,500),(-500,200),mesh.vectorCCy[indx],normal='Y')
ndata = len(x)*len(y)
fld_x = fld[:ndata].reshape((len(y),len(x)))
fld_y = fld[ndata:2*ndata].reshape((len(y),len(x)))
fld_z = -fld[2*ndata:].reshape((len(y),len(x)))
fld_B = np.sqrt(fld_x**2 + fld_y**2+ fld_z**2)
padx = 4
m_true[airc] = np.nan
pos = ax1.get_position()
ax2 = fig.add_axes([pos.x0, pos.y0 - 0.475, pos.width, pos.height])
dat = mesh.plotSlice(m_true, ax = ax2, normal='Y', ind=indx, clim=np.r_[vmin, vmax],pcolorOpts={'cmap':'viridis'})
# plt.colorbar(dat[0])
plt.gca().set_aspect('equal')
plt.title('Bxz-field')
strp = ax2.streamplot(x, y, fld_x, fld_z,color='k',density=1, linewidth = 2., arrowsize = 5.)
# ax2.xaxis.set_visible(False)
xlim(-500, 500)
ylim(-500, 200)
fig.savefig('MAG_VectorField.png',dpi = 200)
print ndata
print survey.srcField.param
###Output
_____no_output_____ |
Toy-Example-Solution.ipynb | ###Markdown
Toy Example: Ridge Regression vs. SVM In this toy example we will compare two machine learning models: Ridge Regression and C-SVM. The data is generated in silico and is only used to illustrate how to use Ridge Regression and C-SVM. Problem Description of the Toy Example A new cancer drug was developed for therapy. During the clinical trail the researchers releaized that the drug had a faster response for a certain subgroup of the patients, while it was less responsive in the others. In addition, the researchers recognized that the drug leads to severe side-effects the longer the patient is treated with the drug. The goal should be to reduce the side effects by treating only those patients that are predicted to have a fast response when taking the drug.The researches believe that different genetic mutations in the genomes of the individual patients might play a role for the differences in response times. The researches contacted the machine learning lab to build a predictive model. The model should predict the individual response time of the drug based on the individual genetic backgrounds of a patient.For this purpose, we get a dataset of 400 patients. For each patient a panel of 600 genetic mutations was measured. In addition, the researchers measured how many days it took until the drug showed a positive response. 1. Using Ridge Regression to predict the response time To predict the response time of the drug for new patients, we will train a Ridge Regression model. The target variable for this task is the response time in days. The features are the 600 genetic mutations measured for each of the 400 patients. To avoid overfitting we will use a nested-crossvalidation to determine the optimal hyperparamter. 1.1 Data Preprocessing
###Code
%matplotlib inline
import scipy as sp
import matplotlib
import pylab as pl
matplotlib.rcParams.update({'font.size': 15})
from sklearn.linear_model import Ridge
from sklearn.svm import SVC
from sklearn.model_selection import KFold, StratifiedKFold, GridSearchCV,StratifiedShuffleSplit
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.metrics import accuracy_score, mean_squared_error, mean_absolute_error
from sklearn.metrics import roc_curve, auc
def visualized_variance_bias_tradeoff(hyperp, line_search, optimal_hyperp,classification=False):
pl.figure(figsize=(18,7))
if classification:
factor=1
else:
factor=-1
pl.plot(hyperp,line_search.cv_results_['mean_train_score']*factor,label="Training Error",color="#e67e22")
pl.fill_between(hyperp,
line_search.cv_results_['mean_train_score']*factor-line_search.cv_results_['std_train_score'],
line_search.cv_results_['mean_train_score']*factor+line_search.cv_results_['std_train_score'],
alpha=0.3,color="#e67e22")
pl.plot(hyperp,line_search.cv_results_['mean_test_score']*factor,label="Validation Error",color="#2980b9")
pl.fill_between(hyperp,
line_search.cv_results_['mean_test_score']*factor-line_search.cv_results_['std_test_score'],
line_search.cv_results_['mean_test_score']*factor+line_search.cv_results_['std_test_score'],
alpha=0.3,color="#2980b9")
pl.xscale("log")
if classification:
pl.ylabel("Accuracy")
else:
pl.ylabel("Mean Squared Error")
pl.xlabel("Hyperparameter")
pl.legend(frameon=True)
pl.grid(True)
pl.axvline(x=optimal_hyperp,color='r',linestyle="--")
pl.title("Training- vs. Validation-Error (Optimal Hyperparameter = %.1e)"%optimal_hyperp);
random_state = 42
#Load Data
data = sp.loadtxt("data/X.txt")
binary_target = sp.loadtxt("data/y_binary.txt")
continuous_target = sp.loadtxt("data/y.txt")
#Summary of the Data
print("Orginal Data")
print("Number Patients:\t%d"%data.shape[0])
print("Number Features:\t%d"%data.shape[1])
print()
#Split Data into Training and Testing data
train_test_data = train_test_split(data,
continuous_target,
test_size=0.2,
random_state=random_state)
training_data = train_test_data[0]
testing_data = train_test_data[1]
training_target = train_test_data[2]
testing_target = train_test_data[3]
print("Training Data")
print("Number Patients:\t%d"%training_data.shape[0])
print("Number Features:\t%d"%training_data.shape[1])
print()
print("Testing Data")
print("Number Patients:\t%d"%testing_data.shape[0])
print("Number Features:\t%d"%testing_data.shape[1])
###Output
Orginal Data
Number Patients: 400
Number Features: 600
Training Data
Number Patients: 320
Number Features: 600
Testing Data
Number Patients: 80
Number Features: 600
###Markdown
1.2 Train Ridge Regression on training dataThe first step is to train the ridge regression model on the training data with a **5-fold cross-validation** with an **internal line-search** to find the **optimal hyperparameter $\alpha$**. We will plot the **training errors** against the **validation errors**, to illustrate the effect of different $\alpha$ values.
###Code
#Initialize different alpha values for the Ridge Regression model
alphas = sp.logspace(-2,8,11)
param_grid = dict(alpha=alphas)
#5-fold cross-validation (outer-loop)
outer_cv = KFold(n_splits=5,shuffle=True,random_state=random_state)
#Line-search to find the optimal alpha value (internal-loop)
#Model performance is measured with the negative mean squared error
line_search = GridSearchCV(Ridge(random_state=random_state,solver="cholesky"),
param_grid=param_grid,
scoring="neg_mean_squared_error",
return_train_score=True)
#Execute nested cross-validation and compute mean squared error
score = cross_val_score(line_search,X=training_data,y=training_target,cv=outer_cv,scoring="neg_mean_squared_error")
print("5-fold nested cross-validation")
print("Mean-Squared-Error:\t\t%.2f (-+ %.2f)"%(score.mean()*(-1),score.std()))
print()
#Estimate optimal alpha on the full training data
line_search.fit(training_data,training_target)
optimal_alpha = line_search.best_params_['alpha']
#Visualize training and validation error for different alphas
visualized_variance_bias_tradeoff(alphas, line_search, optimal_alpha)
###Output
5-fold nested cross-validation
Mean-Squared-Error: 587.09 (-+ 53.45)
###Markdown
1.3 Train Ridge Regression with optimal $\alpha$ and evaluate model in test dataNext we retrain the ridge regresssion model with the optimal $\alpha$ (from the last section). After re-training we will test the model on the not used test data to evaluate the model performance on unseen data.
###Code
#Train Ridge Regression on the full training data with optimal alpha
model = Ridge(alpha=optimal_alpha,solver="cholesky")
model.fit(training_data,training_target)
#Use trained model the predict new instances in test data
predictions = model.predict(testing_data)
print("Prediction results on test data")
print("MSE (test data, alpha=optimal):\t%.2f "%(mean_squared_error(testing_target,predictions)))
print("Optimal Alpha:\t\t\t%.2f"%optimal_alpha)
print()
###Output
Prediction results on test data
MSE (test data, alpha=optimal): 699.56
Optimal Alpha: 100000.00
###Markdown
Using 5-fold cross-validation on the training data leads to a mean squared error (MSE) of $MSE=587.09 \pm 53.54$. On the test data we get an error of $MSE=699.56$ ($\sim 26.5$ days). That indicates that the ridge regression model performs rather mediocre (even with hyperparameter optimization). One reason might be that the target variable (number of days until the drug shows a positive response) is insufficently described by the given features (genetic mutations). 2. Prediction of patients with slow and fast response times using a Support-Vector-Machine Due to the rather bad results with the ridge regession model the machine learning lab returned to the researchers to discuss potential issues. The researches than mentioned that it might not be necessarily important to predict the exact number of days. It might be even better to only predict if a patient reacts fast or slowly on the drug. Based on some prior experiments the researchers observed, that most of the patients showed severe side-effects after 50 days of treatment. Thus we can binarise the data, such that all patients below 50 days are put into class 0 and all others into class 1. This leads to a classical classification problem for which a support vector machine could be used. 2.1 Data Preprocessing
###Code
#Split data into training and testing splits, stratified by class-ratios
stratiefied_splitter = StratifiedShuffleSplit(n_splits=1,test_size=0.2,random_state=42)
for train_index,test_index in stratiefied_splitter.split(data,binary_target):
training_data = data[train_index,:]
training_target = binary_target[train_index]
testing_data = data[test_index,:]
testing_target = binary_target[test_index]
print("Training Data")
print("Number Patients:\t\t%d"%training_data.shape[0])
print("Number Features:\t\t%d"%training_data.shape[1])
print("Number Patients Class 0:\t%d"%(training_target==0).sum())
print("Number Patients Class 1:\t%d"%(training_target==1).sum())
print()
print("Testing Data")
print("Number Patients:\t\t%d"%testing_data.shape[0])
print("Number Features:\t\t%d"%testing_data.shape[1])
print("Number Patients Class 0:\t%d"%(testing_target==0).sum())
print("Number Patients Class 1:\t%d"%(testing_target==1).sum())
###Output
Training Data
Number Patients: 320
Number Features: 600
Number Patients Class 0: 160
Number Patients Class 1: 160
Testing Data
Number Patients: 80
Number Features: 600
Number Patients Class 0: 40
Number Patients Class 1: 40
###Markdown
2.2 Classification with a linear SVM
###Code
Cs = sp.logspace(-7, 1, 9)
param_grid = dict(C=Cs)
grid = GridSearchCV(SVC(kernel="linear",random_state=random_state),
param_grid=param_grid,
scoring="accuracy",
n_jobs=4,
return_train_score=True)
outer_cv = StratifiedKFold(n_splits=5,shuffle=True,random_state=random_state)
#Perform 5 Fold cross-validation with internal line-search and report average Accuracy
score = cross_val_score(grid,X=training_data,y=training_target,cv=outer_cv,scoring="accuracy")
print("5-fold nested cross-validation on training data")
print("Average(Accuracy):\t\t\t%.2f (-+ %.2f)"%(score.mean(),score.std()))
print()
grid.fit(training_data,training_target)
optimal_C = grid.best_params_['C']
#Plot variance bias tradeoff
visualized_variance_bias_tradeoff(Cs, grid, optimal_C,classification=True)
#retrain model with optimal C and evaluate on test data
model = SVC(C=optimal_C,random_state=random_state,kernel="linear")
model.fit(training_data,training_target)
predictions = model.predict(testing_data)
print("Prediction with optimal C")
print("Accuracy (Test data, C=Optimal):\t%.2f "%(accuracy_score(testing_target,predictions)))
print("Optimal C:\t\t\t\t%.2e"%optimal_C)
print()
#Compute ROC FPR, TPR and AUC
fpr, tpr, _ = roc_curve(testing_target, model.decision_function(testing_data))
roc_auc = auc(fpr, tpr)
#Plot ROC Curve
pl.figure(figsize=(8,8))
pl.plot(fpr, tpr, color='darkorange',
lw=3, label='ROC curve (AUC = %0.2f)' % roc_auc)
pl.plot([0, 1], [0, 1], color='navy', lw=3, linestyle='--')
pl.xlim([-0.01, 1.0])
pl.ylim([0.0, 1.05])
pl.xlabel('False Positive Rate (1-Specificity)',fontsize=18)
pl.ylabel('True Positive Rate (Sensitivity)',fontsize=18)
pl.title('Receiver Operating Characteristic (ROC) Curve',fontsize=18)
pl.legend(loc="lower right",fontsize=18)
###Output
5-fold nested cross-validation on training data
Average(Accuracy): 0.80 (-+ 0.02)
Prediction with optimal C
Accuracy (Test data, C=Optimal): 0.82
Optimal C: 1.00e-04
###Markdown
2.3 Classification with SVM and RBF kernel
###Code
Cs = sp.logspace(-4, 4, 9)
gammas = sp.logspace(-7, 1, 9)
param_grid = dict(C=Cs,gamma=gammas)
grid = GridSearchCV(SVC(kernel="rbf",random_state=42),
param_grid=param_grid,
scoring="accuracy",
n_jobs=4,
return_train_score=True)
outer_cv = StratifiedKFold(n_splits=5,shuffle=True,random_state=random_state)
#Perform 5 Fold cross-validation with internal line-search and report average Accuracy
score = cross_val_score(grid,X=training_data,y=training_target,cv=outer_cv,scoring="accuracy")
print("5-fold nested cross-validation on training data")
print("Average(Accuracy):\t\t\t%.2f (-+ %.2f)"%(score.mean(),score.std()))
print()
grid.fit(training_data,training_target)
optimal_C = grid.best_params_['C']
optimal_gamma = grid.best_params_['gamma']
#Retrain and test
model = SVC(C=optimal_C,gamma=optimal_gamma,random_state=42,kernel="rbf")
model.fit(training_data,training_target)
predictions = model.predict(testing_data)
print("Prediction with optimal C and Gamma")
print("Accuracy (Test Data, C=Optimal):\t%.2f "%(accuracy_score(testing_target,predictions)))
print("Optimal C:\t\t\t\t%.2e"%optimal_C)
print("Optimal Gamma:\t\t\t\t%.2e"%optimal_gamma)
print()
#Compute ROC FPR, TPR and AUC
fpr, tpr, _ = roc_curve(testing_target, model.decision_function(testing_data))
roc_auc = auc(fpr, tpr)
#Plot ROC Curve
pl.figure(figsize=(8,8))
pl.plot(fpr, tpr, color='darkorange',
lw=3, label='ROC curve (AUC = %0.2f)' % roc_auc)
pl.plot([0, 1], [0, 1], color='navy', lw=3, linestyle='--')
pl.xlim([-0.01, 1.0])
pl.ylim([0.0, 1.05])
pl.xlabel('False Positive Rate (1-Specificity)',fontsize=18)
pl.ylabel('True Positive Rate (Sensitivity)',fontsize=18)
pl.title('Receiver Operating Characteristic (ROC) Curve',fontsize=18)
pl.legend(loc="lower right",fontsize=18)
###Output
5-fold nested cross-validation on training data
Average(Accuracy): 0.86 (-+ 0.02)
Prediction with optimal C and Gamma
Accuracy (Test Data, C=Optimal): 0.93
Optimal C: 1.00e+01
Optimal Gamma: 1.00e-05
|
computer-vision-pytorch/6-transfer-learning.ipynb | ###Markdown
Pre-trained models and transfer learningTraining CNNs can take a lot of time, and a lot of data is required for that task. However, much of the time is spent to learn the best low-level filters that a network is using to extract patterns from images. A natural question arises - can we use a neural network trained on one dataset and adapt it to classifying different images without full training process?This approach is called **transfer learning**, because we transfer some knowledge from one neural network model to another. In transfer learning, we typically start with a pre-trained model, which has been trained on some large image dataset, such as **ImageNet**. Those models can already do a good job extracting different features from generic images, and in many cases just building a classifier on top of those extracted features can yield a good result.
###Code
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from torchinfo import summary
import numpy as np
import os
from pytorchcv import train, plot_results, display_dataset, train_long, check_image_dir
###Output
_____no_output_____
###Markdown
Cats vs. Dogs DatasetIn this unit, we will solve a real-life problem of classifying images of cats and dogs. For this reason, we will use [Kaggle Cats vs. Dogs Dataset](https://www.kaggle.com/c/dogs-vs-cats), which can also be downloaded [from Microsoft](https://www.microsoft.com/en-us/download/details.aspx?id=54765).Let's download this dataset and extract it into `data` directory (this process may take some time!):
###Code
if not os.path.exists('data/kagglecatsanddogs_3367a.zip'):
!wget -P data -q https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip
import zipfile
if not os.path.exists('data/PetImages'):
with zipfile.ZipFile('data/kagglecatsanddogs_3367a.zip', 'r') as zip_ref:
zip_ref.extractall('data')
###Output
_____no_output_____
###Markdown
Unfortunately, there are some corrupt image files in the dataset. We need to do quick cleaning to check for corrupted files. In order not to clobber this tutorial, we moved the code to verify dataset into a module.
###Code
check_image_dir('data/PetImages/Cat/*.jpg')
check_image_dir('data/PetImages/Dog/*.jpg')
###Output
Corrupt image: data/PetImages/Cat/666.jpg
Corrupt image: data/PetImages/Dog/11702.jpg
###Markdown
Next, let's load the images into PyTorch dataset, converting them to tensors and doing some normalization. We will apply `std_normalize` transform to bring images to the range expected by pre-trained VGG network:
###Code
std_normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
trans = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
std_normalize])
dataset = torchvision.datasets.ImageFolder('data/PetImages',transform=trans)
trainset, testset = torch.utils.data.random_split(dataset,[20000,len(dataset)-20000])
display_dataset(dataset)
###Output
_____no_output_____
###Markdown
Pre-trained modelsThere are many different pre-trained models available inside `torchvision` module, and even more models can be found on the Internet. Let's see how simplest VGG-16 model can be loaded and used:
###Code
vgg = torchvision.models.vgg16(pretrained=True)
sample_image = dataset[0][0].unsqueeze(0)
res = vgg(sample_image)
print(res[0].argmax())
###Output
tensor(282)
###Markdown
The result that we have received is a number of an `ImageNet` class, which can be looked up [here](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). We can use the following code to automatically load this class table and return the result:
###Code
import json, requests
class_map = json.loads(requests.get("https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json").text)
class_map = { int(k) : v for k,v in class_map.items() }
class_map[res[0].argmax().item()]
###Output
_____no_output_____
###Markdown
Let's also see the architecture of the VGG-16 network:
###Code
summary(vgg,input_size=(1,3,224,224))
###Output
_____no_output_____
###Markdown
In addition to the layer we already know, there is also another layer type called **Dropout**. These layers act as **regularization** technique. Regularization makes slight modifications to the learning algorithm so the model generalizes better. During training, dropout layers discard some proportion (around 30%) of the neurons in the previous layer, and training happens without them. This helps to get the optimization process out of local minima, and to distribute decisive power between different neural paths, which improves overall stability of the network. GPU computationsDeep neural networks, such as VGG-16 and other more modern architectures require quite a lot of computational power to run. It makes sense to use GPU acceleration, if it is available. In order to do so, we need to explicitly move all tensors involved in the computation to GPU.The way it is normally done is to check the availability of GPU in the code, and define `device` variable that points to the computational device - either GPU or CPU.
###Code
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Doing computations on device = {}'.format(device))
vgg.to(device)
sample_image = sample_image.to(device)
vgg(sample_image).argmax()
###Output
Doing computations on device = cuda
###Markdown
Extracting VGG featuresIf we want to use VGG-16 to extract features from our images, we need the model without final classification layers. In fact, this "feature extractor" can be obtained using `vgg.features` method:
###Code
res = vgg.features(sample_image).cpu()
plt.figure(figsize=(15,3))
plt.imshow(res.detach().view(-1,512))
print(res.size())
###Output
torch.Size([1, 512, 7, 7])
###Markdown
The dimension of feature tensor is 512x7x7, but in order to visualize it we had to reshape it to 2D form.Now let's try to see if those features can be used to classify images. Let's manually take some portion of images (800 in our case), and pre-compute their feature vectors. We will store the result in one big tensor called `feature_tensor`, and also labels into `label_tensor`:
###Code
bs = 8
dl = torch.utils.data.DataLoader(dataset,batch_size=bs,shuffle=True)
num = bs*100
feature_tensor = torch.zeros(num,512*7*7).to(device)
label_tensor = torch.zeros(num).to(device)
i = 0
for x,l in dl:
with torch.no_grad():
f = vgg.features(x.to(device))
feature_tensor[i:i+bs] = f.view(bs,-1)
label_tensor[i:i+bs] = l
i+=bs
print('.',end='')
if i>=num:
break
###Output
....................................................................................................
###Markdown
Now we can define `vgg_dataset` that takes data from this tensor, split it into training and test sets using `random_split` function, and train a small one-layer dense classifier network on top of extracted features:
###Code
vgg_dataset = torch.utils.data.TensorDataset(feature_tensor,label_tensor.to(torch.long))
train_ds, test_ds = torch.utils.data.random_split(vgg_dataset,[700,100])
train_loader = torch.utils.data.DataLoader(train_ds,batch_size=32)
test_loader = torch.utils.data.DataLoader(test_ds,batch_size=32)
net = torch.nn.Sequential(torch.nn.Linear(512*7*7,2),torch.nn.LogSoftmax()).to(device)
history = train(net,train_loader,test_loader)
###Output
/anaconda/envs/py37_pytorch/lib/python3.7/site-packages/torch/nn/modules/container.py:100: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.
input = module(input)
###Markdown
The result is great, we can distinguish between a cat and a dog with almost 98% probability! However, we have only tested this approach on a small subset of all images, because manual feature extraction seems to take a lot of time. Transfer learning using one VGG networkWe can also avoid manually pre-computing the features by using the original VGG-16 network as a whole during training. Let's look at the VGG-16 object structure:
###Code
print(vgg)
###Output
VGG(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU(inplace=True)
(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(6): ReLU(inplace=True)
(7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(8): ReLU(inplace=True)
(9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(11): ReLU(inplace=True)
(12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(13): ReLU(inplace=True)
(14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(15): ReLU(inplace=True)
(16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(18): ReLU(inplace=True)
(19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(20): ReLU(inplace=True)
(21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(22): ReLU(inplace=True)
(23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(25): ReLU(inplace=True)
(26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(27): ReLU(inplace=True)
(28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(29): ReLU(inplace=True)
(30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(avgpool): AdaptiveAvgPool2d(output_size=(7, 7))
(classifier): Sequential(
(0): Linear(in_features=25088, out_features=4096, bias=True)
(1): ReLU(inplace=True)
(2): Dropout(p=0.5, inplace=False)
(3): Linear(in_features=4096, out_features=4096, bias=True)
(4): ReLU(inplace=True)
(5): Dropout(p=0.5, inplace=False)
(6): Linear(in_features=4096, out_features=1000, bias=True)
)
)
###Markdown
You can see that the network contains:* feature extractor (`features`), comprised of a number of convolutional and pooling layers* average pooling layer (`avgpool`)* final `classifier`, consisting of several dense layers, which turns 25088 input features into 1000 classes (which is the number of classes in ImageNet)To train the end-to-end model that will classify our dataset, we need to:* **replace the final classifier** with the one that will produce required number of classes. In our case, we can use one `Linear` layer with 25088 inputs and 2 output neurons.* **freeze weights of convolutional feature extractor**, so that they are not trained. It is recommended to initially do this freezing, because otherwise untrained classifier layer can destroy the original pre-trained weights of convolutional extractor. Freezing weights can be accomplished by setting `requires_grad` property of all parameters to `False`
###Code
vgg.classifier = torch.nn.Linear(25088,2).to(device)
for x in vgg.features.parameters():
x.requires_grad = False
summary(vgg,(1, 3,244,244))
###Output
_____no_output_____
###Markdown
As you can see from the summary, this model contain around 15 million total parameters, but only 50k of them are trainable - those are the weights of classification layer. That is good, because we are able to fine-tune smaller number of parameters with smaller number of examples.Now let's train the model using our original dataset. This process will take a long time, so we will use `train_long` function that will print some intermediate results without waiting for the end of epoch. It is highly recommended to run this training on GPU-enabled compute!
###Code
trainset, testset = torch.utils.data.random_split(dataset,[20000,len(dataset)-20000])
train_loader = torch.utils.data.DataLoader(trainset,batch_size=16)
test_loader = torch.utils.data.DataLoader(testset,batch_size=16)
train_long(vgg,train_loader,test_loader,loss_fn=torch.nn.CrossEntropyLoss(),epochs=1,print_freq=90)
###Output
Epoch 0, minibatch 0: train acc = 0.5625, train loss = 0.045165419578552246
Epoch 0, minibatch 90: train acc = 0.945054945054945, train loss = 0.10654432694990557
Epoch 0, minibatch 180: train acc = 0.9558011049723757, train loss = 0.10983258452863325
Epoch 0, minibatch 270: train acc = 0.9612546125461254, train loss = 0.11056054034356262
Epoch 0, minibatch 360: train acc = 0.9657202216066482, train loss = 0.09974451672667611
Epoch 0, minibatch 450: train acc = 0.9667405764966741, train loss = 0.10326384652215996
Epoch 0, minibatch 540: train acc = 0.9695009242144177, train loss = 0.1081675954314565
###Markdown
It looks like we have obtained reasonably accurate cats vs. dogs classifier! Let's save it for future use!
###Code
torch.save(vgg,'data/cats_dogs.pth')
###Output
_____no_output_____
###Markdown
We can then load the model from file at any time. You may find it useful in case the next experiment destroys the model - you would not have to re-start from scratch.
###Code
vgg = torch.load('data/cats_dogs.pth')
###Output
_____no_output_____
###Markdown
Fine-tuning transfer learningIn the previous section, we have trained the final classifier layer to classify images in our own dataset. However, we did not re-train the feature extractor, and our model relied on the features that the model has learned on ImageNet data. If your objects visually differ from ordinary ImageNet images, this combination of features might not work best. Thus it makes sense to start training convolutional layers as well.To do that, we can unfreeze the convolutional filter parameters that we have previously frozen. > **Note:** It is important that you freeze parameters first and perform several epochs of training in order to stabilize weights in the classification layer. If you immediately start training end-to-end network with unfrozen parameters, large errors are likely to destroy the pre-trained weights in the convolutional layers.
###Code
for x in vgg.features.parameters():
x.requires_grad = True
###Output
_____no_output_____
###Markdown
After unfreezing, we can do a few more epochs of training. You can also select lower learning rate, in order to minimize the impact on the pre-trained weights. However, even with low learning rate, you can expect the accuracy to drop in the beginning of the training, until finally reaching slightly higher level than in the case of fixed weights.> **Note:** This training happens much slower, because we need to propagate gradients back through many layers of the network! You may want to watch the first few minibatches to see the tendency, and then stop the computation.
###Code
train_long(vgg,train_loader,test_loader,loss_fn=torch.nn.CrossEntropyLoss(),epochs=1,print_freq=90,lr=0.0001)
###Output
Epoch 0, minibatch 0: train acc = 0.9375, train loss = 0.05164256691932678
Epoch 0, minibatch 90: train acc = 0.9237637362637363, train loss = 0.22198507288000086
Epoch 0, minibatch 180: train acc = 0.9350828729281768, train loss = 0.1316179834018096
Epoch 0, minibatch 270: train acc = 0.9446494464944649, train loss = 0.09719027980227311
Epoch 0, minibatch 360: train acc = 0.9510041551246537, train loss = 0.07776963083367598
Epoch 0, minibatch 450: train acc = 0.9542682926829268, train loss = 0.0652109091139157
Epoch 0, minibatch 540: train acc = 0.9554066543438078, train loss = 0.057144408305339144
Epoch 0, minibatch 630: train acc = 0.9562202852614897, train loss = 0.051035371710872496
Epoch 0, minibatch 720: train acc = 0.9535367545076283, train loss = 0.048412391778996186
Epoch 0, minibatch 810: train acc = 0.9558415536374846, train loss = 0.043707310197973664
Epoch 0, minibatch 900: train acc = 0.9573390677025527, train loss = 0.04010265490058789
Epoch 0, minibatch 990: train acc = 0.9588168516649849, train loss = 0.03708902353234777
Epoch 0, minibatch 1080: train acc = 0.9598751156336726, train loss = 0.034591798314774734
Epoch 0, minibatch 1170: train acc = 0.9600234842015372, train loss = 0.03259906394170147
Epoch 0 done, validation acc = 0.977390956382553, validation loss = 0.004988700878911134
###Markdown
Other computer vision modelsVGG-16 is one of the simplest computer vision architectures. `torchvision` package provides many more pre-trained networks. The most frequently used ones among those are **ResNet** architectures, developed by Microsoft, and **Inception** by Google. For example, let's explore the architecture of the simplest ResNet-18 model (ResNet is a family of models with different depth, you can try experimenting with ResNet-151 if you want to see what a really deep model looks like):
###Code
resnet = torchvision.models.resnet18()
print(resnet)
###Output
ResNet(
(conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
(layer1): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(layer2): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(layer3): Sequential(
(0): BasicBlock(
(conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(layer4): Sequential(
(0): BasicBlock(
(conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(avgpool): AdaptiveAvgPool2d(output_size=(1, 1))
(fc): Linear(in_features=512, out_features=1000, bias=True)
)
###Markdown
Pre-trained models and transfer learningTraining CNNs can take a lot of time, and a lot of data is required for that task. However, much of the time is spent to learn the best low-level filters that a network is using to extract patterns from images. A natural question arises - can we use a neural network trained on one dataset and adapt it to classifying different images without full training process?This approach is called **transfer learning**, because we transfer some knowledge from one neural network model to another. In transfer learning, we typically start with a pre-trained model, which has been trained on some large image dataset, such as **ImageNet**. Those models can already do a good job extracting different features from generic images, and in many cases just building a classifier on top of those extracted features can yield a good result.
###Code
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from torchinfo import summary
import numpy as np
import os
from pytorchcv import train, plot_results, display_dataset, train_long, check_image_dir
###Output
_____no_output_____
###Markdown
Cats vs. Dogs DatasetIn this unit, we will solve a real-life problem of classifying images of cats and dogs. For this reason, we will use [Kaggle Cats vs. Dogs Dataset](https://www.kaggle.com/c/dogs-vs-cats), which can also be downloaded [from Microsoft](https://www.microsoft.com/en-us/download/details.aspx?id=54765).Let's download this dataset and extract it into `data` directory (this process may take some time!):
###Code
if not os.path.exists('data/kagglecatsanddogs_3367a.zip'):
!wget -P data -q https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip
import zipfile
if not os.path.exists('data/PetImages'):
with zipfile.ZipFile('data/kagglecatsanddogs_3367a.zip', 'r') as zip_ref:
zip_ref.extractall('data')
###Output
_____no_output_____
###Markdown
Unfortunately, there are some corrupt image files in the dataset. We need to do quick cleaning to check for corrupted files. In order not to clobber this tutorial, we moved the code to verify dataset into a module.
###Code
check_image_dir('data/PetImages/Cat/*.jpg')
check_image_dir('data/PetImages/Dog/*.jpg')
###Output
Corrupt image: data/PetImages/Cat/666.jpg
###Markdown
Next, let's load the images into PyTorch dataset, converting them to tensors and doing some normalization. We will apply `std_normalize` transform to bring images to the range expected by pre-trained VGG network:
###Code
std_normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
trans = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
std_normalize])
dataset = torchvision.datasets.ImageFolder('data/PetImages',transform=trans)
trainset, testset = torch.utils.data.random_split(dataset,[20000,len(dataset)-20000])
display_dataset(dataset)
###Output
_____no_output_____
###Markdown
Pre-trained modelsThere are many different pre-trained models available inside `torchvision` module, and even more models can be found on the Internet. Let's see how simplest VGG-16 model can be loaded and used:
###Code
vgg = torchvision.models.vgg16(pretrained=True)
sample_image = dataset[0][0].unsqueeze(0)
res = vgg(sample_image)
print(res[0].argmax())
###Output
Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /home/cathy/.cache/torch/hub/checkpoints/vgg16-397923af.pth
###Markdown
The result that we have received is a number of an `ImageNet` class, which can be looked up [here](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). We can use the following code to automatically load this class table and return the result:
###Code
import json, requests
class_map = json.loads(requests.get("https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json").text)
class_map = { int(k) : v for k,v in class_map.items() }
class_map[res[0].argmax().item()]
###Output
_____no_output_____
###Markdown
Let's also see the architecture of the VGG-16 network:
###Code
summary(vgg,input_size=(1,3,224,224))
###Output
_____no_output_____
###Markdown
In addition to the layer we already know, there is also another layer type called **Dropout**. These layers act as **regularization** technique. Regularization makes slight modifications to the learning algorithm so the model generalizes better. During training, dropout layers discard some proportion (around 30%) of the neurons in the previous layer, and training happens without them. This helps to get the optimization process out of local minima, and to distribute decisive power between different neural paths, which improves overall stability of the network. GPU computationsDeep neural networks, such as VGG-16 and other more modern architectures require quite a lot of computational power to run. It makes sense to use GPU acceleration, if it is available. In order to do so, we need to explicitly move all tensors involved in the computation to GPU.The way it is normally done is to check the availability of GPU in the code, and define `device` variable that points to the computational device - either GPU or CPU.
###Code
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Doing computations on device = {}'.format(device))
vgg.to(device)
sample_image = sample_image.to(device)
vgg(sample_image).argmax()
###Output
Doing computations on device = cuda
###Markdown
Extracting VGG featuresIf we want to use VGG-16 to extract features from our images, we need the model without final classification layers. In fact, this "feature extractor" can be obtained using `vgg.features` method:
###Code
res = vgg.features(sample_image).cpu()
plt.figure(figsize=(15,3))
plt.imshow(res.detach().view(-1,512))
print(res.size())
###Output
torch.Size([1, 512, 7, 7])
###Markdown
The dimension of feature tensor is 512x7x7, but in order to visualize it we had to reshape it to 2D form.Now let's try to see if those features can be used to classify images. Let's manually take some portion of images (800 in our case), and pre-compute their feature vectors. We will store the result in one big tensor called `feature_tensor`, and also labels into `label_tensor`:
###Code
bs = 8
dl = torch.utils.data.DataLoader(dataset,batch_size=bs,shuffle=True)
num = bs*100
feature_tensor = torch.zeros(num,512*7*7).to(device)
label_tensor = torch.zeros(num).to(device)
i = 0
for x,l in dl:
with torch.no_grad():
f = vgg.features(x.to(device))
feature_tensor[i:i+bs] = f.view(bs,-1)
label_tensor[i:i+bs] = l
i+=bs
print('.',end='')
if i>=num:
break
###Output
....................................................................................................
###Markdown
Now we can define `vgg_dataset` that takes data from this tensor, split it into training and test sets using `random_split` function, and train a small one-layer dense classifier network on top of extracted features:
###Code
vgg_dataset = torch.utils.data.TensorDataset(feature_tensor,label_tensor.to(torch.long))
train_ds, test_ds = torch.utils.data.random_split(vgg_dataset,[700,100])
train_loader = torch.utils.data.DataLoader(train_ds,batch_size=32)
test_loader = torch.utils.data.DataLoader(test_ds,batch_size=32)
net = torch.nn.Sequential(torch.nn.Linear(512*7*7,2),torch.nn.LogSoftmax()).to(device)
history = train(net,train_loader,test_loader)
###Output
/anaconda/envs/py38_pytorch/lib/python3.8/site-packages/torch/nn/modules/container.py:119: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.
input = module(input)
###Markdown
The result is great, we can distinguish between a cat and a dog with almost 98% probability! However, we have only tested this approach on a small subset of all images, because manual feature extraction seems to take a lot of time. Transfer learning using one VGG networkWe can also avoid manually pre-computing the features by using the original VGG-16 network as a whole during training. Let's look at the VGG-16 object structure:
###Code
print(vgg)
###Output
VGG(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU(inplace=True)
(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(6): ReLU(inplace=True)
(7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(8): ReLU(inplace=True)
(9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(11): ReLU(inplace=True)
(12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(13): ReLU(inplace=True)
(14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(15): ReLU(inplace=True)
(16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(18): ReLU(inplace=True)
(19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(20): ReLU(inplace=True)
(21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(22): ReLU(inplace=True)
(23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(25): ReLU(inplace=True)
(26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(27): ReLU(inplace=True)
(28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(29): ReLU(inplace=True)
(30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(avgpool): AdaptiveAvgPool2d(output_size=(7, 7))
(classifier): Sequential(
(0): Linear(in_features=25088, out_features=4096, bias=True)
(1): ReLU(inplace=True)
(2): Dropout(p=0.5, inplace=False)
(3): Linear(in_features=4096, out_features=4096, bias=True)
(4): ReLU(inplace=True)
(5): Dropout(p=0.5, inplace=False)
(6): Linear(in_features=4096, out_features=1000, bias=True)
)
)
###Markdown
You can see that the network contains:* feature extractor (`features`), comprised of a number of convolutional and pooling layers* average pooling layer (`avgpool`)* final `classifier`, consisting of several dense layers, which turns 25088 input features into 1000 classes (which is the number of classes in ImageNet)To train the end-to-end model that will classify our dataset, we need to:* **replace the final classifier** with the one that will produce required number of classes. In our case, we can use one `Linear` layer with 25088 inputs and 2 output neurons.* **freeze weights of convolutional feature extractor**, so that they are not trained. It is recommended to initially do this freezing, because otherwise untrained classifier layer can destroy the original pre-trained weights of convolutional extractor. Freezing weights can be accomplished by setting `requires_grad` property of all parameters to `False`
###Code
vgg.classifier = torch.nn.Linear(25088,2).to(device)
for x in vgg.features.parameters():
x.requires_grad = False
summary(vgg,(1, 3,244,244))
###Output
_____no_output_____
###Markdown
As you can see from the summary, this model contain around 15 million total parameters, but only 50k of them are trainable - those are the weights of classification layer. That is good, because we are able to fine-tune smaller number of parameters with smaller number of examples.Now let's train the model using our original dataset. This process will take a long time, so we will use `train_long` function that will print some intermediate results without waiting for the end of epoch. It is highly recommended to run this training on GPU-enabled compute!
###Code
trainset, testset = torch.utils.data.random_split(dataset,[20000,len(dataset)-20000])
train_loader = torch.utils.data.DataLoader(trainset,batch_size=16)
test_loader = torch.utils.data.DataLoader(testset,batch_size=16)
train_long(vgg,train_loader,test_loader,loss_fn=torch.nn.CrossEntropyLoss(),epochs=1,print_freq=90)
###Output
Epoch 0, minibatch 0: train acc = 0.5, train loss = 0.0431101992726326
Epoch 0, minibatch 90: train acc = 0.9539835164835165, train loss = 0.0960497070144821
###Markdown
It looks like we have obtained reasonably accurate cats vs. dogs classifier! Let's save it for future use!
###Code
torch.save(vgg,'data/cats_dogs.pth')
###Output
_____no_output_____
###Markdown
We can then load the model from file at any time. You may find it useful in case the next experiment destroys the model - you would not have to re-start from scratch.
###Code
vgg = torch.load('data/cats_dogs.pth')
###Output
_____no_output_____
###Markdown
Fine-tuning transfer learningIn the previous section, we have trained the final classifier layer to classify images in our own dataset. However, we did not re-train the feature extractor, and our model relied on the features that the model has learned on ImageNet data. If your objects visually differ from ordinary ImageNet images, this combination of features might not work best. Thus it makes sense to start training convolutional layers as well.To do that, we can unfreeze the convolutional filter parameters that we have previously frozen. > **Note:** It is important that you freeze parameters first and perform several epochs of training in order to stabilize weights in the classification layer. If you immediately start training end-to-end network with unfrozen parameters, large errors are likely to destroy the pre-trained weights in the convolutional layers.
###Code
for x in vgg.features.parameters():
x.requires_grad = True
###Output
_____no_output_____
###Markdown
After unfreezing, we can do a few more epochs of training. You can also select lower learning rate, in order to minimize the impact on the pre-trained weights. However, even with low learning rate, you can expect the accuracy to drop in the beginning of the training, until finally reaching slightly higher level than in the case of fixed weights.> **Note:** This training happens much slower, because we need to propagate gradients back through many layers of the network! You may want to watch the first few minibatches to see the tendency, and then stop the computation.
###Code
train_long(vgg,train_loader,test_loader,loss_fn=torch.nn.CrossEntropyLoss(),epochs=1,print_freq=90,lr=0.0001)
###Output
Epoch 0, minibatch 0: train acc = 1.0, train loss = 0.0
Epoch 0, minibatch 90: train acc = 0.8990384615384616, train loss = 0.2978392171335744
Epoch 0, minibatch 180: train acc = 0.9060773480662984, train loss = 0.1658294214069514
Epoch 0, minibatch 270: train acc = 0.9102859778597786, train loss = 0.11819224340009514
Epoch 0, minibatch 360: train acc = 0.9191481994459834, train loss = 0.09244130522920814
Epoch 0, minibatch 450: train acc = 0.9261363636363636, train loss = 0.07583886292451236
Epoch 0, minibatch 540: train acc = 0.928373382624769, train loss = 0.06537413817456822
Epoch 0, minibatch 630: train acc = 0.9318541996830428, train loss = 0.057419379426257924
Epoch 0, minibatch 720: train acc = 0.9361130374479889, train loss = 0.05114534460059813
Epoch 0, minibatch 810: train acc = 0.938347718865598, train loss = 0.04657612246737968
Epoch 0, minibatch 900: train acc = 0.9407602663706992, train loss = 0.04258851655712403
Epoch 0, minibatch 990: train acc = 0.9431130171543896, train loss = 0.03927870595491257
Epoch 0, minibatch 1080: train acc = 0.945536540240518, train loss = 0.03652716609309053
Epoch 0, minibatch 1170: train acc = 0.9463065755764304, train loss = 0.03445258006186286
Epoch 0 done, validation acc = 0.974389755902361, validation loss = 0.005457923144233279
###Markdown
Other computer vision modelsVGG-16 is one of the simplest computer vision architectures. `torchvision` package provides many more pre-trained networks. The most frequently used ones among those are **ResNet** architectures, developed by Microsoft, and **Inception** by Google. For example, let's explore the architecture of the simplest ResNet-18 model (ResNet is a family of models with different depth, you can try experimenting with ResNet-151 if you want to see what a really deep model looks like):
###Code
resnet = torchvision.models.resnet18()
print(resnet)
###Output
ResNet(
(conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
(layer1): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(layer2): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(layer3): Sequential(
(0): BasicBlock(
(conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(layer4): Sequential(
(0): BasicBlock(
(conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(avgpool): AdaptiveAvgPool2d(output_size=(1, 1))
(fc): Linear(in_features=512, out_features=1000, bias=True)
)
###Markdown
Pre-Trained Models and Transfer LearningTraining CNNs can take a lot of time, and a lot of data is required for that task. However, much of the time is spent to learn the best low-level filters that a network is using to extract patterns from images. A natural question arises - can we use a neural network trained on one dataset and adapt it to classifyling different images without full training process?This approach is called **transfer learning**, because we transfer some knowledge from one neural network model to another. In transfer learning, we typically start with a pre-trained model, which has been trained on some large image dataset, such as **ImageNet**. Those models can already do a good job extracting different features from generic images, and in many cases just building a classifier on top of those extracted features can yield a good result.
###Code
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from torchinfo import summary
import numpy as np
import os
from pytorchcv import train, plot_results, display_dataset, train_long, check_image_dir
###Output
_____no_output_____
###Markdown
Cats vs. Dogs DatasetIn this unit, we will solve a real-life problem of classifying images of cats and dogs. For this reason, we will use [Kaggle Cats vs. Dogs Dataset](https://www.kaggle.com/c/dogs-vs-cats), which can also be downloaded [from Microsoft](https://www.microsoft.com/en-us/download/details.aspx?id=54765).Let's download this dataset and extract it into `data` directory (this process may take some time!):
###Code
if not os.path.exists('data/kagglecatsanddogs_3367a.zip'):
!wget -P data -q http://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip
import zipfile
if not os.path.exists('data/PetImages'):
with zipfile.ZipFile('data/kagglecatsanddogs_3367a.zip', 'r') as zip_ref:
zip_ref.extractall('data')
###Output
_____no_output_____
###Markdown
Unfortunately, there are some corrupt image files in the dataset. We need to do quick cleaning to check for corrupted files. In order not to clobber this tutorial, we moved the code to verify dataset into a module.
###Code
check_image_dir('data/PetImages/Cat/*.jpg')
check_image_dir('data/PetImages/Dog/*.jpg')
###Output
Corrupt image: data/PetImages/Cat/666.jpg
Corrupt image: data/PetImages/Dog/11702.jpg
###Markdown
Next, let's load the images into PyTorch dataset, convering them to tensors and doing some normalization. We will apply `std_normalize` transform to bring images to the range expected by pre-trained VGG network:
###Code
std_normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
trans = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
std_normalize])
dataset = torchvision.datasets.ImageFolder('data/PetImages',transform=trans)
trainset, testset = torch.utils.data.random_split(dataset,[20000,len(dataset)-20000])
display_dataset(dataset)
###Output
_____no_output_____
###Markdown
Pre-Trained ModelsThere are many different pre-trianed models available inside `torchvision` module, and even more models can be found on the Internet. Let's see how simplest VGG-16 model can be loaded and used:
###Code
vgg = torchvision.models.vgg16(pretrained=True)
sample_image = dataset[0][0].unsqueeze(0)
res = vgg(sample_image)
print(res[0].argmax())
###Output
tensor(282)
###Markdown
The result that we have received is a number of an ImageNet class, which can be looked up [here](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). We can use the following code to automatically load this class table and return the result:
###Code
import json, requests
class_map = json.loads(requests.get("https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json").text)
class_map = { int(k) : v for k,v in class_map.items() }
class_map[res[0].argmax().item()]
###Output
_____no_output_____
###Markdown
Let's also see the architecture of the VGG-16 network:
###Code
summary(vgg,input_size=(1,3,224,224))
###Output
_____no_output_____
###Markdown
In addition to the layer we already know, there is also another layer type called **Dropout**. Those layers act as **regularization** technique. The way they work is during training some proportion (aroung 30%) of the neurons in the previous layer is discarded, and training happens without them. This helps to get the optimization process out of local minima, and to distribute decisive power between different neural paths, which imporoves overall stability of the network. GPU ComputationsDeep neural networks, such as VGG-16 and other more modern architecures require quite a lot of computational power to run. It makes sense to use GPU acceleration, if it is available. In order to do so, we need to explicitly move all tensors involved in the computation to GPU.The way it is normally done is to check the availability of GPU in the code, and define `device` variable that points to the computational device - either GPU or CPU.
###Code
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Doing computations on device = {}'.format(device))
vgg.to(device)
sample_image = sample_image.to(device)
vgg(sample_image).argmax()
###Output
Doing computations on device = cuda
###Markdown
Extracting VGG featuresIf we want to use VGG-16 to extract features from our images, we need the model without final classification layers. In fact, this "feature extractor" can be obtained using `vgg.features` method:
###Code
res = vgg.features(sample_image).cpu()
plt.figure(figsize=(15,3))
plt.imshow(res.detach().view(-1,512))
print(res.size())
###Output
torch.Size([1, 512, 7, 7])
###Markdown
The dimension of feature tensor is 512x7x7, but in order to visualize it we had to reshape it to 2D form.Now let's try to see if those features can be used to classify images. Let's manually take some portion of images (800 in our case), and pre-compute their feature vectors. We will store the result in one big tensor called `feature_tensor`, and also labels into `label_tensor`:
###Code
bs = 8
dl = torch.utils.data.DataLoader(dataset,batch_size=bs,shuffle=True)
num = bs*100
feature_tensor = torch.zeros(num,512*7*7).to(device)
label_tensor = torch.zeros(num).to(device)
i = 0
for x,l in dl:
with torch.no_grad():
f = vgg.features(x.to(device))
feature_tensor[i:i+bs] = f.view(bs,-1)
label_tensor[i:i+bs] = l
i+=bs
print('.',end='')
if i>=num:
break
###Output
....................................................................................................
###Markdown
Now we can define `vgg_dataset` that takes data from this tensor, split it into training and test sets using `random_split` function, and train a small one-layer dense classifier network on top of extracted features:
###Code
vgg_dataset = torch.utils.data.TensorDataset(feature_tensor,label_tensor.to(torch.long))
train_ds, test_ds = torch.utils.data.random_split(vgg_dataset,[700,100])
train_loader = torch.utils.data.DataLoader(train_ds,batch_size=32)
test_loader = torch.utils.data.DataLoader(test_ds,batch_size=32)
net = torch.nn.Sequential(torch.nn.Linear(512*7*7,2),torch.nn.LogSoftmax()).to(device)
history = train(net,train_loader,test_loader)
###Output
/anaconda/envs/py37_pytorch/lib/python3.7/site-packages/torch/nn/modules/container.py:100: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.
input = module(input)
###Markdown
The result is great, we can distinguish between a cat and a dog with almost 98% probability! However, we have only tested this approach on a small subset of all images, because manual feature extraction seems to take a lot of time. Transfer Learning using One VGG NetworkWe can also avoid manually pre-computing the features by using the original VGG-16 network as a whole during training. Let's look at the VGG-16 object structure:
###Code
print(vgg)
###Output
VGG(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU(inplace=True)
(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(6): ReLU(inplace=True)
(7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(8): ReLU(inplace=True)
(9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(11): ReLU(inplace=True)
(12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(13): ReLU(inplace=True)
(14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(15): ReLU(inplace=True)
(16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(18): ReLU(inplace=True)
(19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(20): ReLU(inplace=True)
(21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(22): ReLU(inplace=True)
(23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(25): ReLU(inplace=True)
(26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(27): ReLU(inplace=True)
(28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(29): ReLU(inplace=True)
(30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(avgpool): AdaptiveAvgPool2d(output_size=(7, 7))
(classifier): Sequential(
(0): Linear(in_features=25088, out_features=4096, bias=True)
(1): ReLU(inplace=True)
(2): Dropout(p=0.5, inplace=False)
(3): Linear(in_features=4096, out_features=4096, bias=True)
(4): ReLU(inplace=True)
(5): Dropout(p=0.5, inplace=False)
(6): Linear(in_features=4096, out_features=1000, bias=True)
)
)
###Markdown
You can see that the network contains:* feature extractor (`features`), comprised of a number of convolutional and pooling layers* average pooling layer (`avgpool`)* final `classifier`, consisting of several dense layers, which turns 25088 input features into 1000 classes (which is the number of classes in ImageNet)To train the end-to-end model that will classify our dataset, we need to:* **replace the final classifier** with the one that will produce required number of classes. In our case, we can use one `Linear` layer with 25088 inputs and 2 output neurons.* **freeze weights of convolutional feature extractor**, so that they are not trained. It is recommended to initially do this freezing, because otherwise untrained classifier layer can destroy the original pre-trained weights of convolutional extractor. Freezing weights can be accomplished by setting `requires_grad` property of all parameters to `False`
###Code
vgg.classifier = torch.nn.Linear(25088,2).to(device)
for x in vgg.features.parameters():
x.requires_grad = False
summary(vgg,(3,244,244))
###Output
_____no_output_____
###Markdown
As you can see from the summary, this model contain aroing 15 million total parameters, but only 50k of them are trainable - those are the weights of classification layer. That is good, because we are able to fine-tune smaller number of parameters with smaller number of examples.Now let's train the model using our original dataset. This process will take a long time, so we will use `train_long` function that will print some intermediate results without waiting for the end of epoch. It is highly recommended to run this training on GPU-enabled compute!
###Code
trainset, testset = torch.utils.data.random_split(dataset,[20000,len(dataset)-20000])
train_loader = torch.utils.data.DataLoader(trainset,batch_size=16)
test_loader = torch.utils.data.DataLoader(testset,batch_size=16)
train_long(vgg,train_loader,test_loader,loss_fn=torch.nn.CrossEntropyLoss(),epochs=1,print_freq=90)
###Output
Epoch 0, minibatch 0: train acc = 0.5625, train loss = 0.045165419578552246
Epoch 0, minibatch 90: train acc = 0.945054945054945, train loss = 0.10654432694990557
Epoch 0, minibatch 180: train acc = 0.9558011049723757, train loss = 0.10983258452863325
Epoch 0, minibatch 270: train acc = 0.9612546125461254, train loss = 0.11056054034356262
Epoch 0, minibatch 360: train acc = 0.9657202216066482, train loss = 0.09974451672667611
Epoch 0, minibatch 450: train acc = 0.9667405764966741, train loss = 0.10326384652215996
Epoch 0, minibatch 540: train acc = 0.9695009242144177, train loss = 0.1081675954314565
###Markdown
It looks like we have obtained reasonably accuract cats vs. dogs classifier! Let's save it for future use!
###Code
torch.save(vgg,'data/cats_dogs.pth')
###Output
_____no_output_____
###Markdown
We can then load the model from file at any time. You may find it useful in case the next experiment destroys the model - you would not have to re-start from scratch.
###Code
vgg = torch.load('data/cats_dogs.pth')
###Output
_____no_output_____
###Markdown
Fine-Tuning Transfer LearningIn the previous section, we have trained the final classifier layer to classify images in our own dataset. However, we did not re-train the feature extractor, and our model relied on the features that the model has learnt on ImageNet data. If your objects visually differ from ordinary ImageNet images, this combination of features might not work best. Thus it makes sense to start training convolutional layers as well.To do that, we can unfreeze the convolutional filter parameters that we have previously freezed. > **Note:** It is important that you freeze parameters first and perform several epochs of training in order to stabilize weights in the classification layer. If you immediately start training end-to-end network with unfreezed parameters, large errors are likely to destroy the pre-trained weights in the convolutional layers.
###Code
for x in vgg.features.parameters():
x.requires_grad = True
###Output
_____no_output_____
###Markdown
After unfreezing, we can do a few more epochs of training. You can also select lower learning rate, in order to minimize the impact on the pre-trained weights. However, even with low learning rate, you can expect the accuracy to drop in the beginning of the training, until finally reaching slightly higher level than in the case of fixed weights.> **Note:** This training happens much slower, because we need to propagate gradients back through many layers of the network! You may want to watch the first few minibatches to see the tendency, and then stop the computation.
###Code
train_long(vgg,train_loader,test_loader,loss_fn=torch.nn.CrossEntropyLoss(),epochs=1,print_freq=90,lr=0.0001)
###Output
Epoch 0, minibatch 0: train acc = 0.9375, train loss = 0.05164256691932678
Epoch 0, minibatch 90: train acc = 0.9237637362637363, train loss = 0.22198507288000086
Epoch 0, minibatch 180: train acc = 0.9350828729281768, train loss = 0.1316179834018096
Epoch 0, minibatch 270: train acc = 0.9446494464944649, train loss = 0.09719027980227311
Epoch 0, minibatch 360: train acc = 0.9510041551246537, train loss = 0.07776963083367598
Epoch 0, minibatch 450: train acc = 0.9542682926829268, train loss = 0.0652109091139157
Epoch 0, minibatch 540: train acc = 0.9554066543438078, train loss = 0.057144408305339144
Epoch 0, minibatch 630: train acc = 0.9562202852614897, train loss = 0.051035371710872496
Epoch 0, minibatch 720: train acc = 0.9535367545076283, train loss = 0.048412391778996186
Epoch 0, minibatch 810: train acc = 0.9558415536374846, train loss = 0.043707310197973664
Epoch 0, minibatch 900: train acc = 0.9573390677025527, train loss = 0.04010265490058789
Epoch 0, minibatch 990: train acc = 0.9588168516649849, train loss = 0.03708902353234777
Epoch 0, minibatch 1080: train acc = 0.9598751156336726, train loss = 0.034591798314774734
Epoch 0, minibatch 1170: train acc = 0.9600234842015372, train loss = 0.03259906394170147
Epoch 0 done, validation acc = 0.977390956382553, validation loss = 0.004988700878911134
###Markdown
Other Computer Vision ModelsVGG-16 is one of the simplest computer vision architectures. `torchvision` package provides many more pre-trained networks. The most frequently used ones among those are **ResNet** architectures, developed by Microsoft, and **Inception** by Google. For example, let's explore the architecture of the simplest ResNet-18 model (ResNet is a family of models with different depth, you can try experimenting with ResNet-151 if you want to see what a really deep model looks like):
###Code
resnet = torchvision.models.resnet18()
print(resnet)
###Output
ResNet(
(conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
(layer1): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(layer2): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(layer3): Sequential(
(0): BasicBlock(
(conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(layer4): Sequential(
(0): BasicBlock(
(conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(avgpool): AdaptiveAvgPool2d(output_size=(1, 1))
(fc): Linear(in_features=512, out_features=1000, bias=True)
)
###Markdown
Pre-Trained Models and Transfer LearningTraining CNNs can take a lot of time, and a lot of data is required for that task. However, much of the time is spent to learn the best low-level filters that a network is using to extract patterns from images. A natural question arises - can we use a neural network trained on one dataset and adapt it to classifyling different images without full training process?This approach is called **transfer learning**, because we transfer some knowledge from one neural network model to another. In transfer learning, we typically start with a pre-trained model, which has been trained on some large image dataset, such as **ImageNet**. Those models can already do a good job extracting different features from generic images, and in many cases just building a classifier on top of those extracted features can yield a good result.
###Code
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from torchsummary import summary
import numpy as np
import os
from pytorchcv import train, plot_results, display_dataset, train_long, check_image_dir
###Output
_____no_output_____
###Markdown
Cats vs. Dogs DatasetIn this unit, we will solve a real-life problem of classifying images of cats and dogs. For this reason, we will use [Kaggle Cats vs. Dogs Dataset](https://www.kaggle.com/c/dogs-vs-cats), which can also be downloaded [from Microsoft](https://www.microsoft.com/en-us/download/details.aspx?id=54765).Let's download this dataset and extract it into `data` directory (this process may take some time!):
###Code
if not os.path.exists('data/kagglecatsanddogs_3367a.zip'):
!wget -P data -q http://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip
import zipfile
if not os.path.exists('data/PetImages'):
with zipfile.ZipFile('data/kagglecatsanddogs_3367a.zip', 'r') as zip_ref:
zip_ref.extractall('data')
###Output
_____no_output_____
###Markdown
Unfortunately, there are some corrupt image files in the dataset. We need to do quick cleaning to check for corrupted files. In order not to clobber this tutorial, we moved the code to verify dataset into a module.
###Code
check_image_dir('data/PetImages/Cat/*.jpg')
check_image_dir('data/PetImages/Dog/*.jpg')
###Output
/anaconda/envs/py37_pytorch/lib/python3.7/site-packages/PIL/TiffImagePlugin.py:770: UserWarning: Possibly corrupt EXIF data. Expecting to read 32 bytes but only got 0. Skipping tag 270
" Skipping tag %s" % (size, len(data), tag)
/anaconda/envs/py37_pytorch/lib/python3.7/site-packages/PIL/TiffImagePlugin.py:770: UserWarning: Possibly corrupt EXIF data. Expecting to read 5 bytes but only got 0. Skipping tag 271
" Skipping tag %s" % (size, len(data), tag)
/anaconda/envs/py37_pytorch/lib/python3.7/site-packages/PIL/TiffImagePlugin.py:770: UserWarning: Possibly corrupt EXIF data. Expecting to read 8 bytes but only got 0. Skipping tag 272
" Skipping tag %s" % (size, len(data), tag)
/anaconda/envs/py37_pytorch/lib/python3.7/site-packages/PIL/TiffImagePlugin.py:770: UserWarning: Possibly corrupt EXIF data. Expecting to read 8 bytes but only got 0. Skipping tag 282
" Skipping tag %s" % (size, len(data), tag)
/anaconda/envs/py37_pytorch/lib/python3.7/site-packages/PIL/TiffImagePlugin.py:770: UserWarning: Possibly corrupt EXIF data. Expecting to read 8 bytes but only got 0. Skipping tag 283
" Skipping tag %s" % (size, len(data), tag)
/anaconda/envs/py37_pytorch/lib/python3.7/site-packages/PIL/TiffImagePlugin.py:770: UserWarning: Possibly corrupt EXIF data. Expecting to read 20 bytes but only got 0. Skipping tag 306
" Skipping tag %s" % (size, len(data), tag)
/anaconda/envs/py37_pytorch/lib/python3.7/site-packages/PIL/TiffImagePlugin.py:770: UserWarning: Possibly corrupt EXIF data. Expecting to read 48 bytes but only got 0. Skipping tag 532
" Skipping tag %s" % (size, len(data), tag)
/anaconda/envs/py37_pytorch/lib/python3.7/site-packages/PIL/TiffImagePlugin.py:788: UserWarning: Corrupt EXIF data. Expecting to read 2 bytes but only got 0.
warnings.warn(str(msg))
###Markdown
Next, let's load the images into PyTorch dataset, convering them to tensors and doing some normalization. We will apply `std_normalize` transform to bring images to the range expected by pre-trained VGG network:
###Code
std_normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
trans = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
std_normalize])
dataset = torchvision.datasets.ImageFolder('data/PetImages',transform=trans)
trainset, testset = torch.utils.data.random_split(dataset,[20000,len(dataset)-20000])
display_dataset(dataset)
###Output
_____no_output_____
###Markdown
Pre-Trained ModelsThere are many different pre-trianed models available inside `torchvision` module, and even more models can be found on the Internet. Let's see how simplest VGG-16 model can be loaded and used:
###Code
vgg = torchvision.models.vgg16(pretrained=True)
sample_image = dataset[0][0].unsqueeze(0)
res = vgg(sample_image)
print(res[0].argmax())
###Output
tensor(281)
###Markdown
The result that we have received is a number of an ImageNet class, which can be looked up [here](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). We can use the following code to automatically load this class table and return the result:
###Code
import json, requests
class_map = json.loads(requests.get("https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json").text)
class_map = { int(k) : v for k,v in class_map.items() }
class_map[res[0].argmax().item()]
###Output
_____no_output_____
###Markdown
Let's also see the architecture of the VGG-16 network:
###Code
summary(vgg,input_size=(3,224,224),device='cpu')
###Output
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [-1, 64, 224, 224] 1,792
ReLU-2 [-1, 64, 224, 224] 0
Conv2d-3 [-1, 64, 224, 224] 36,928
ReLU-4 [-1, 64, 224, 224] 0
MaxPool2d-5 [-1, 64, 112, 112] 0
Conv2d-6 [-1, 128, 112, 112] 73,856
ReLU-7 [-1, 128, 112, 112] 0
Conv2d-8 [-1, 128, 112, 112] 147,584
ReLU-9 [-1, 128, 112, 112] 0
MaxPool2d-10 [-1, 128, 56, 56] 0
Conv2d-11 [-1, 256, 56, 56] 295,168
ReLU-12 [-1, 256, 56, 56] 0
Conv2d-13 [-1, 256, 56, 56] 590,080
ReLU-14 [-1, 256, 56, 56] 0
Conv2d-15 [-1, 256, 56, 56] 590,080
ReLU-16 [-1, 256, 56, 56] 0
MaxPool2d-17 [-1, 256, 28, 28] 0
Conv2d-18 [-1, 512, 28, 28] 1,180,160
ReLU-19 [-1, 512, 28, 28] 0
Conv2d-20 [-1, 512, 28, 28] 2,359,808
ReLU-21 [-1, 512, 28, 28] 0
Conv2d-22 [-1, 512, 28, 28] 2,359,808
ReLU-23 [-1, 512, 28, 28] 0
MaxPool2d-24 [-1, 512, 14, 14] 0
Conv2d-25 [-1, 512, 14, 14] 2,359,808
ReLU-26 [-1, 512, 14, 14] 0
Conv2d-27 [-1, 512, 14, 14] 2,359,808
ReLU-28 [-1, 512, 14, 14] 0
Conv2d-29 [-1, 512, 14, 14] 2,359,808
ReLU-30 [-1, 512, 14, 14] 0
MaxPool2d-31 [-1, 512, 7, 7] 0
AdaptiveAvgPool2d-32 [-1, 512, 7, 7] 0
Linear-33 [-1, 4096] 102,764,544
ReLU-34 [-1, 4096] 0
Dropout-35 [-1, 4096] 0
Linear-36 [-1, 4096] 16,781,312
ReLU-37 [-1, 4096] 0
Dropout-38 [-1, 4096] 0
Linear-39 [-1, 1000] 4,097,000
================================================================
Total params: 138,357,544
Trainable params: 138,357,544
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.57
Forward/backward pass size (MB): 218.78
Params size (MB): 527.79
Estimated Total Size (MB): 747.15
----------------------------------------------------------------
###Markdown
In addition to the layer we already know, there is also another layer type called **Dropout**. Those layers act as **regularization** technique. The way they work is during training some proportion (aroung 30%) of the neurons in the previous layer is discarded, and training happens without them. This helps to get the optimization process out of local minima, and to distribute decisive power between different neural paths, which imporoves overall stability of the network. GPU ComputationsDeep neural networks, such as VGG-16 and other more modern architecures require quite a lot of computational power to run. It makes sense to use GPU acceleration, if it is available. In order to do so, we need to explicitly move all tensors involved in the computation to GPU.The way it is normally done is to check the availability of GPU in the code, and define `device` variable that points to the computational device - either GPU or CPU.
###Code
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Doing computations on device = {}'.format(device))
vgg.to(device)
sample_image = sample_image.to(device)
vgg(sample_image).argmax()
###Output
Doing computations on device = cuda
###Markdown
Extracting VGG featuresIf we want to use VGG-16 to extract features from our images, we need the model without final classification layers. In fact, this "feature extractor" can be obtained using `vgg.features` method:
###Code
res = vgg.features(sample_image).cpu()
plt.figure(figsize=(15,3))
plt.imshow(res.detach().view(-1,512))
print(res.size())
###Output
torch.Size([1, 512, 7, 7])
###Markdown
The dimension of feature tensor is 512x7x7, but in order to visualize it we had to reshape it to 2D form.Now let's try to see if those features can be used to classify images. Let's manually take some portion of images (800 in our case), and pre-compute their feature vectors. We will store the result in one big tensor called `feature_tensor`, and also labels into `label_tensor`:
###Code
bs = 8
dl = torch.utils.data.DataLoader(dataset,batch_size=bs,shuffle=True)
num = bs*100
feature_tensor = torch.zeros(num,512*7*7).to(device)
label_tensor = torch.zeros(num).to(device)
i = 0
for x,l in dl:
with torch.no_grad():
f = vgg.features(x.to(device))
feature_tensor[i:i+bs] = f.view(bs,-1)
label_tensor[i:i+bs] = l
i+=bs
print('.',end='')
if i>=num:
break
###Output
....................................................................................................
###Markdown
Now we can define `vgg_dataset` that takes data from this tensor, split it into training and test sets using `random_split` function, and train a small one-layer dense classifier network on top of extracted features:
###Code
vgg_dataset = torch.utils.data.TensorDataset(feature_tensor,label_tensor.to(torch.long))
train_ds, test_ds = torch.utils.data.random_split(vgg_dataset,[700,100])
train_loader = torch.utils.data.DataLoader(train_ds,batch_size=32)
test_loader = torch.utils.data.DataLoader(test_ds,batch_size=32)
net = torch.nn.Sequential(torch.nn.Linear(512*7*7,2),torch.nn.LogSoftmax()).to(device)
history = train(net,train_loader,test_loader)
###Output
/anaconda/envs/py37_pytorch/lib/python3.7/site-packages/torch/nn/modules/container.py:100: UserWarning: Implicit dimension choice for log_softmax has been deprecated. Change the call to include dim=X as an argument.
input = module(input)
###Markdown
The result is great, we can distinguish between a cat and a dog with almost 98% probability! However, we have only tested this approach on a small subset of all images, because manual feature extraction seems to take a lot of time. Transfer Learning using One VGG NetworkWe can also avoid manually pre-computing the features by using the original VGG-16 network as a whole during training. Let's look at the VGG-16 object structure:
###Code
print(vgg)
###Output
VGG(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU(inplace=True)
(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(6): ReLU(inplace=True)
(7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(8): ReLU(inplace=True)
(9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(11): ReLU(inplace=True)
(12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(13): ReLU(inplace=True)
(14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(15): ReLU(inplace=True)
(16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(18): ReLU(inplace=True)
(19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(20): ReLU(inplace=True)
(21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(22): ReLU(inplace=True)
(23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(25): ReLU(inplace=True)
(26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(27): ReLU(inplace=True)
(28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(29): ReLU(inplace=True)
(30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(avgpool): AdaptiveAvgPool2d(output_size=(7, 7))
(classifier): Sequential(
(0): Linear(in_features=25088, out_features=4096, bias=True)
(1): ReLU(inplace=True)
(2): Dropout(p=0.5, inplace=False)
(3): Linear(in_features=4096, out_features=4096, bias=True)
(4): ReLU(inplace=True)
(5): Dropout(p=0.5, inplace=False)
(6): Linear(in_features=4096, out_features=1000, bias=True)
)
)
###Markdown
You can see that the network contains:* feature extractor (`features`), comprised of a number of convolutional and pooling layers* average pooling layer (`avgpool`)* final `classifier`, consisting of several dense layers, which turns 25088 input features into 1000 classes (which is the number of classes in ImageNet)To train the end-to-end model that will classify our dataset, we need to:* **replace the final classifier** with the one that will produce required number of classes. In our case, we can use one `Linear` layer with 25088 inputs and 2 output neurons.* **freeze weights of convolutional feature extractor**, so that they are not trained. It is recommended to initially do this freezing, because otherwise untrained classifier layer can destroy the original pre-trained weights of convolutional extractor. Freezing weights can be accomplished by setting `requires_grad` property of all parameters to `False`
###Code
vgg.classifier = torch.nn.Linear(25088,2).to(device)
for x in vgg.features.parameters():
x.requires_grad = False
summary(vgg,(3,244,244))
###Output
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [-1, 64, 244, 244] 1,792
ReLU-2 [-1, 64, 244, 244] 0
Conv2d-3 [-1, 64, 244, 244] 36,928
ReLU-4 [-1, 64, 244, 244] 0
MaxPool2d-5 [-1, 64, 122, 122] 0
Conv2d-6 [-1, 128, 122, 122] 73,856
ReLU-7 [-1, 128, 122, 122] 0
Conv2d-8 [-1, 128, 122, 122] 147,584
ReLU-9 [-1, 128, 122, 122] 0
MaxPool2d-10 [-1, 128, 61, 61] 0
Conv2d-11 [-1, 256, 61, 61] 295,168
ReLU-12 [-1, 256, 61, 61] 0
Conv2d-13 [-1, 256, 61, 61] 590,080
ReLU-14 [-1, 256, 61, 61] 0
Conv2d-15 [-1, 256, 61, 61] 590,080
ReLU-16 [-1, 256, 61, 61] 0
MaxPool2d-17 [-1, 256, 30, 30] 0
Conv2d-18 [-1, 512, 30, 30] 1,180,160
ReLU-19 [-1, 512, 30, 30] 0
Conv2d-20 [-1, 512, 30, 30] 2,359,808
ReLU-21 [-1, 512, 30, 30] 0
Conv2d-22 [-1, 512, 30, 30] 2,359,808
ReLU-23 [-1, 512, 30, 30] 0
MaxPool2d-24 [-1, 512, 15, 15] 0
Conv2d-25 [-1, 512, 15, 15] 2,359,808
ReLU-26 [-1, 512, 15, 15] 0
Conv2d-27 [-1, 512, 15, 15] 2,359,808
ReLU-28 [-1, 512, 15, 15] 0
Conv2d-29 [-1, 512, 15, 15] 2,359,808
ReLU-30 [-1, 512, 15, 15] 0
MaxPool2d-31 [-1, 512, 7, 7] 0
AdaptiveAvgPool2d-32 [-1, 512, 7, 7] 0
Linear-33 [-1, 2] 50,178
================================================================
Total params: 14,764,866
Trainable params: 50,178
Non-trainable params: 14,714,688
----------------------------------------------------------------
Input size (MB): 0.68
Forward/backward pass size (MB): 258.32
Params size (MB): 56.32
Estimated Total Size (MB): 315.32
----------------------------------------------------------------
###Markdown
As you can see from the summary, this model contain aroing 15 million total parameters, but only 50k of them are trainable - those are the weights of classification layer. That is good, because we are able to fine-tune smaller number of parameters with smaller number of examples.Now let's train the model using our original dataset. This process will take a long time, so we will use `train_long` function that will print some intermediate results without waiting for the end of epoch. It is highly recommended to run this training on GPU-enabled compute!
###Code
trainset, testset = torch.utils.data.random_split(dataset,[20000,len(dataset)-20000])
train_loader = torch.utils.data.DataLoader(trainset,batch_size=16)
test_loader = torch.utils.data.DataLoader(testset,batch_size=16)
train_long(vgg,train_loader,test_loader,loss_fn=torch.nn.CrossEntropyLoss(),epochs=1,print_freq=90)
###Output
Epoch 0, minibatch 0: train acc = 0.5625, train loss = 0.04118568077683449
Epoch 0, minibatch 90: train acc = 0.9402472527472527, train loss = 0.15443821791764145
Epoch 0, minibatch 180: train acc = 0.9516574585635359, train loss = 0.12943799983072019
Epoch 0, minibatch 270: train acc = 0.9568726937269373, train loss = 0.12680998235611018
Epoch 0, minibatch 360: train acc = 0.9617382271468145, train loss = 0.1176932591150342
Epoch 0, minibatch 450: train acc = 0.9646618625277162, train loss = 0.11877366973132622
Epoch 0, minibatch 540: train acc = 0.9668438077634011, train loss = 0.11604859780471999
Epoch 0, minibatch 630: train acc = 0.9690966719492868, train loss = 0.10795477188521443
Epoch 0, minibatch 720: train acc = 0.9702669902912622, train loss = 0.11028558569708413
Epoch 0, minibatch 810: train acc = 0.9707151664611591, train loss = 0.11101691908077894
Epoch 0, minibatch 900: train acc = 0.9716287458379578, train loss = 0.11134163944358699
Epoch 0, minibatch 990: train acc = 0.9716826437941474, train loss = 0.1267101776708147
Epoch 0, minibatch 1080: train acc = 0.9725948196114709, train loss = 0.12673569078471902
Epoch 0, minibatch 1170: train acc = 0.9729397950469684, train loss = 0.1306172932039256
Epoch 0 done, validation acc = 0.9811924769907964, validation loss = 0.15237108026804472
###Markdown
It looks like we have obtained reasonably accuract cats vs. dogs classifier! Let's save it for future use!
###Code
torch.save(vgg,'data/cats_dogs.pth')
###Output
_____no_output_____
###Markdown
We can then load the model from file at any time. You may find it useful in case the next experiment destroys the model - you would not have to re-start from scratch.
###Code
vgg = torch.load('data/cats_dogs.pth')
###Output
_____no_output_____
###Markdown
Fine-Tuning Transfer LearningIn the previous section, we have trained the final classifier layer to classify images in our own dataset. However, we did not re-train the feature extractor, and our model relied on the features that the model has learnt on ImageNet data. If your objects visually differ from ordinary ImageNet images, this combination of features might not work best. Thus it makes sense to start training convolutional layers as well.To do that, we can unfreeze the convolutional filter parameters that we have previously freezed. > **Note:** It is important that you freeze parameters first and perform several epochs of training in order to stabilize weights in the classification layer. If you immediately start training end-to-end network with unfreezed parameters, large errors are likely to destroy the pre-trained weights in the convolutional layers.
###Code
for x in vgg.features.parameters():
x.requires_grad = True
###Output
_____no_output_____
###Markdown
After unfreezing, we can do a few more epochs of training. You can also select lower learning rate, in order to minimize the impact on the pre-trained weights. However, even with low learning rate, you can expect the accuracy to drop in the beginning of the training, until finally reaching slightly higher level than in the case of fixed weights.> **Note:** This training happens much slower, because we need to propagate gradients back through many layers of the network! You may want to watch the first few minibatches to see the tendency, and then stop the computation.
###Code
train_long(vgg,train_loader,test_loader,loss_fn=torch.nn.CrossEntropyLoss(),epochs=1,print_freq=90,lr=0.0001)
###Output
Epoch 0, minibatch 0: train acc = 1.0, train loss = 0.0
Epoch 0, minibatch 90: train acc = 0.9004120879120879, train loss = 0.19138541588416466
Epoch 0, minibatch 180: train acc = 0.9181629834254144, train loss = 0.10758332521217304
Epoch 0, minibatch 270: train acc = 0.9268911439114391, train loss = 0.07839146575364679
Epoch 0, minibatch 360: train acc = 0.932825484764543, train loss = 0.06241319186139305
Epoch 0, minibatch 450: train acc = 0.9368070953436807, train loss = 0.0522826209565224
Epoch 0, minibatch 540: train acc = 0.9411968576709797, train loss = 0.044761597779674145
Epoch 0, minibatch 630: train acc = 0.9449286846275753, train loss = 0.039440200370389575
Epoch 0, minibatch 720: train acc = 0.9479022191400832, train loss = 0.03541238231897023
Epoch 0, minibatch 810: train acc = 0.9475955610357584, train loss = 0.03271689691320154
Epoch 0, minibatch 900: train acc = 0.9464483906770256, train loss = 0.030598789685045046
Epoch 0, minibatch 990: train acc = 0.9482214934409687, train loss = 0.028380287162711473
Epoch 0, minibatch 1080: train acc = 0.9495259019426457, train loss = 0.026479318779337527
Epoch 0, minibatch 1170: train acc = 0.950629803586678, train loss = 0.024938184313037067
Epoch 0 done, validation acc = 0.9573829531812725, validation loss = 0.0077711557950817045
###Markdown
Other Computer Vision ModelsVGG-16 is one of the simplest computer vision architectures. `torchvision` package provides many more pre-trained networks. The most frequently used ones among those are **ResNet** architectures, developed by Microsoft, and **Inception** by Google. For example, let's explore the architecture of the simplest ResNet-18 model (ResNet is a family of models with different depth, you can try experimenting with ResNet-151 if you want to see what a really deep model looks like):
###Code
resnet = torchvision.models.resnet18()
print(resnet)
###Output
ResNet(
(conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
(layer1): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(layer2): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(layer3): Sequential(
(0): BasicBlock(
(conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(layer4): Sequential(
(0): BasicBlock(
(conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(avgpool): AdaptiveAvgPool2d(output_size=(1, 1))
(fc): Linear(in_features=512, out_features=1000, bias=True)
)
|
Alpha_parameter_experiments.ipynb | ###Markdown
We first define functions which give us the competitive ratios of the respective algoirthms
###Code
def LAS_energy_ratio(_J_true, _J_pred, epsilon, alpha, dt):
#compute energy of LAS algorithm
J_true = copy.deepcopy(_J_true)
J_pred = copy.deepcopy(_J_pred)
speed_sol = LAS(J_pred, J_true, epsilon, dt, alpha)
energy_LAS = sum([s**alpha for s in speed_sol])*dt
#compute speedlist and energu consumption of the optimal schedule of the true instance
J_true = copy.deepcopy(_J_true)
J_pred = copy.deepcopy(_J_pred)
optimal_alg_speed_list, _ = Optimal_Alg(J_true)
energy_optimal = compute_energy(optimal_alg_speed_list, alpha)
return float(energy_LAS)/energy_optimal
#returns the energy ratio AVR_energy/Optimal_energy
def AVR_energy_ratio(_J, alpha):
J = copy.deepcopy(_J)
#speed list of average rate
AVR_speed_list = Avg_rate(J)
#energy consumption of AVR
energy_AVR = compute_energy(AVR_speed_list, alpha)
J = copy.deepcopy(_J)
#speed list of the optimal schedule
optimal_alg_speed_list, _ = Optimal_Alg(J)
#energy consumption of the optimal schedule
energy_optimal = compute_energy(optimal_alg_speed_list, alpha)
return float(energy_AVR)/energy_optimal
#returns the energy ratio OA_energy/Optimal_energy
def OA_energy_ratio(_J, alpha):
J = copy.deepcopy(_J)
#speed list of Optimal Available
OA_speed_list = OptimalOnline(J)
#energy consumption of Optimal Available
energy_OA = sum([s**alpha for s in OA_speed_list])
J = copy.deepcopy(_J)
#speed list of the optimal schedule
optimal_alg_speed_list, _ = Optimal_Alg(J)
#energy consumption of the optimal schedule
energy_optimal = compute_energy(optimal_alg_speed_list, alpha)
return float(energy_OA)/energy_optimal
#returns the energy ratio BKP_energy/Optimal_energy
def BKP_energy_ratio(_J, granularity, alpha):
J = copy.deepcopy(_J)
#energy consumption of the BKP algorithm
energy_BKP = BKP_alg(J, granularity, alpha)
J = copy.deepcopy(_J)
#speed list of the optimal schedule
optimal_alg_speed_list, _ = Optimal_Alg(J)
#energy consumption of the optimal schedule
energy_optimal = compute_energy(optimal_alg_speed_list, alpha)
return float(energy_BKP)/energy_optimal
###Output
_____no_output_____
###Markdown
Data preprocessing functions
###Code
def date_to_int(date):
date = int("".join(date.split('-')))
return date
def time_to_int(time):
time = time[:-1].split(':')
time = time[0:2]
time[1] = time[1][0]
time = int("".join(time))
return time
def minimum_granularity(lst):
#we assume that the input list is sorted
res = max(lst)
for i in range(1,len(lst)):
res = min(res, lst[i]-lst[i-1])
return res
def plot_histogram(l):
x_y = [(x, l.count(x)) for x in set(l)]
x_y = sorted(x_y, key=lambda t: t[0])
ys = []
for x, y in x_y:
ys.append(y)
xs = range(0, len(ys))
plt.plot(xs, ys)
plt.draw()
return 0
def create_input(lst, D):
time_weight = [(time, lst.count(time)) for time in set(lst)]
time_weight = sorted(time_weight, key=lambda t: t[0])
i = 0
res = {}
for time, weight in time_weight:
res[i+1] = (weight, i , i+D)
i+=1
return res
def ranges(nums):
nums = sorted(set(nums))
gaps = [[s, e] for s, e in zip(nums, nums[1:]) if s+1 < e]
edges = iter(nums[:1] + sum(gaps, []) + nums[-1:])
return list(zip(edges, edges))
###Output
_____no_output_____
###Markdown
Reading the input file
###Code
with open(dataset) as fin:
i = 0
dates = []
times = []
date_and_time = []
print("I just started reading the file")
for line in fin:
if i == -1:
break
try:
date, time = line.split()[1].split("T")
except:
print("the problem occurs in line = ", i)
print(line)
if len(date) != 10:
print("a date is wrong")
print(date)
time = time_to_int(time)
date = date_to_int(date)
times.append(time)
dates.append(date)
date_and_time.append((date, time))
i+=1
print("I just finished reading the file...everything seems ok")
###Output
I just started reading the file
I just finished reading the file...everything seems ok
###Markdown
Discretization of the timeline + we maintain only days with enough data The discretization is in ten minute chunks and we maintain days were all ten minute chunks receive at least one request
###Code
dates = list(set(dates))
dates = sorted(dates)
dates_dict = {}
for date in dates:
dates_dict[date] = []
for date, time in date_and_time:
dates_dict[date].append(time)
lens_in_a_day = []
for date in sorted(dates_dict.keys()):
dates_dict[date] = sorted(dates_dict[date])
len_of_the_day = len(set(dates_dict[date]))
if len_of_the_day < 144:
del dates_dict[date]
continue
lens_in_a_day.append(len_of_the_day)
#dates_lst maps integer i--->date
dates_lst = []
for date in sorted(dates_dict.keys()):
dates_lst.append(date)
num_of_days = len(dates_lst)
###Output
_____no_output_____
###Markdown
Example on how the loads in three different dates look like
###Code
date = 20090601
plot_histogram(dates_dict[date+100])
plot_histogram(dates_dict[date+101])
plot_histogram(dates_dict[date+102])
plt.show()
###Output
_____no_output_____
###Markdown
Experiments ( it may more than an hour)
###Code
D = 20
dt = 0.01
alphas = [3,6,9,12]
epsilons = [Fraction(1,100), Fraction(80,100)]
###Output
_____no_output_____
###Markdown
Experiments for $\alpha = 3$
###Code
y_LAS = {}
y_AVR = []
y_OA = []
alpha = alphas[0] # = 3
for epsilon in epsilons:
y_LAS[epsilon]=[]
for i in range(0,num_of_days-1, 1):
print("day=", i+1)
previous_day = dates_lst[i]
today = dates_lst[i+1]
J_pred = create_input(dates_dict[previous_day], D)
J_true = create_input(dates_dict[today], D)
AVR = AVR_energy_ratio(J_true, alpha)
OA = OA_energy_ratio(J_true, alpha)
print("AVG= ",AVR)
y_AVR.append(AVR)
print("OA= ",OA)
y_OA.append(OA)
for epsilon in epsilons:
LAS_scheduling = LAS_energy_ratio(J_true, J_pred, epsilon, alpha, dt)
print("LAS, $\epsilon$=", epsilon, "-->", LAS_scheduling)
y_LAS[epsilon].append(LAS_scheduling)
print("========")
###Output
day= 1
AVG= 1.4824002000840508
OA= 1.3782932094232627
LAS, $\epsilon$= 1/100 --> 1.229374937227341
LAS, $\epsilon$= 4/5 --> 1.2573794555025257
========
day= 2
AVG= 1.513920856429493
OA= 1.424085236073388
LAS, $\epsilon$= 1/100 --> 1.1851689543771335
LAS, $\epsilon$= 4/5 --> 1.2218032618216266
========
day= 3
AVG= 1.518574065440833
OA= 1.3258713296751432
LAS, $\epsilon$= 1/100 --> 1.1397062289218842
LAS, $\epsilon$= 4/5 --> 1.1818068209067762
========
day= 4
AVG= 1.4844515510099394
OA= 1.331386570653595
LAS, $\epsilon$= 1/100 --> 1.0679410265435376
LAS, $\epsilon$= 4/5 --> 1.0812228194856552
========
day= 5
AVG= 1.6921359069589235
OA= 1.392700926628663
LAS, $\epsilon$= 1/100 --> 1.1574261636139307
LAS, $\epsilon$= 4/5 --> 1.1485673168965755
========
day= 6
AVG= 1.7241619383316547
OA= 1.4012695766561636
LAS, $\epsilon$= 1/100 --> 1.1652478371958066
LAS, $\epsilon$= 4/5 --> 1.1422484716415275
========
day= 7
AVG= 1.5975930244324712
OA= 1.3260716137531718
LAS, $\epsilon$= 1/100 --> 1.334524791972887
LAS, $\epsilon$= 4/5 --> 1.3668370356251112
========
day= 8
AVG= 1.5451025748568585
OA= 1.3442289126886284
LAS, $\epsilon$= 1/100 --> 1.101943859999334
LAS, $\epsilon$= 4/5 --> 1.1316443062961858
========
day= 9
AVG= 1.5391753729450453
OA= 1.3202702602523513
LAS, $\epsilon$= 1/100 --> 1.0826766277669708
LAS, $\epsilon$= 4/5 --> 1.1043688446511855
========
day= 10
AVG= 1.5758199548458145
OA= 1.3990552497368631
LAS, $\epsilon$= 1/100 --> 1.0791730238383428
LAS, $\epsilon$= 4/5 --> 1.0984289683790407
========
day= 11
AVG= 1.5636669435799495
OA= 1.3371879335504606
LAS, $\epsilon$= 1/100 --> 1.133939747792612
LAS, $\epsilon$= 4/5 --> 1.1511773834520294
========
day= 12
AVG= 1.722472028106299
OA= 1.3938777162024927
LAS, $\epsilon$= 1/100 --> 1.164727338372885
LAS, $\epsilon$= 4/5 --> 1.1827678616566166
========
day= 13
AVG= 1.4441648876958835
OA= 1.3159540206547666
LAS, $\epsilon$= 1/100 --> 1.285617929303493
LAS, $\epsilon$= 4/5 --> 1.299437875019546
========
day= 14
AVG= 1.5022656351158614
OA= 1.3609041701059825
LAS, $\epsilon$= 1/100 --> 1.1056669719117425
LAS, $\epsilon$= 4/5 --> 1.1128626824374253
========
day= 15
AVG= 1.5386711286484154
OA= 1.322890680022159
LAS, $\epsilon$= 1/100 --> 1.1033864144281476
LAS, $\epsilon$= 4/5 --> 1.110307559716801
========
day= 16
AVG= 1.5967078659380078
OA= 1.3204160627245165
LAS, $\epsilon$= 1/100 --> 1.0806871895339905
LAS, $\epsilon$= 4/5 --> 1.1051838200202302
========
day= 17
AVG= 1.7051519177183105
OA= 1.3870385202277542
LAS, $\epsilon$= 1/100 --> 1.1158554445725772
LAS, $\epsilon$= 4/5 --> 1.1438215237342637
========
day= 18
AVG= 1.590232469464035
OA= 1.3838384334624243
LAS, $\epsilon$= 1/100 --> 1.1212416285724671
LAS, $\epsilon$= 4/5 --> 1.129314261491135
========
day= 19
AVG= 1.540049699571489
OA= 1.3453807110875033
LAS, $\epsilon$= 1/100 --> 1.365885292691191
LAS, $\epsilon$= 4/5 --> 1.3745306658407705
========
day= 20
AVG= 1.5595335933794838
OA= 1.322568563335031
LAS, $\epsilon$= 1/100 --> 1.1097118001246833
LAS, $\epsilon$= 4/5 --> 1.126939045308068
========
day= 21
AVG= 1.47070895054001
OA= 1.293204404842401
LAS, $\epsilon$= 1/100 --> 1.0549350991876987
LAS, $\epsilon$= 4/5 --> 1.0762472913380443
========
day= 22
AVG= 1.6034099856539235
OA= 1.3722500369540573
LAS, $\epsilon$= 1/100 --> 1.1315198387567207
LAS, $\epsilon$= 4/5 --> 1.1533654871956267
========
day= 23
AVG= 1.717870407579277
OA= 1.4154854147332463
LAS, $\epsilon$= 1/100 --> 1.1553900135757487
LAS, $\epsilon$= 4/5 --> 1.157342651183574
========
day= 24
AVG= 1.685419792971609
OA= 1.4309925106973265
LAS, $\epsilon$= 1/100 --> 1.1026935345117117
LAS, $\epsilon$= 4/5 --> 1.1220135233314108
========
day= 25
AVG= 1.512611901616585
OA= 1.3172134611324766
LAS, $\epsilon$= 1/100 --> 1.2859934181326491
LAS, $\epsilon$= 4/5 --> 1.288643759876031
========
day= 26
AVG= 1.5236004025372483
OA= 1.3248452205415828
LAS, $\epsilon$= 1/100 --> 1.1034764416955878
LAS, $\epsilon$= 4/5 --> 1.1100609843189693
========
day= 27
AVG= 1.590258155612399
OA= 1.3150547992281405
LAS, $\epsilon$= 1/100 --> 1.1110499915102765
LAS, $\epsilon$= 4/5 --> 1.1288382927791012
========
day= 28
AVG= 1.621828042696132
OA= 1.3278625300666609
LAS, $\epsilon$= 1/100 --> 1.108716356860749
LAS, $\epsilon$= 4/5 --> 1.1323864662085645
========
day= 29
AVG= 1.6496668948273752
OA= 1.4272387836336433
LAS, $\epsilon$= 1/100 --> 1.1731360034763496
LAS, $\epsilon$= 4/5 --> 1.209267069728132
========
day= 30
AVG= 1.6803204725729795
OA= 1.370063303733607
LAS, $\epsilon$= 1/100 --> 1.112743746478904
LAS, $\epsilon$= 4/5 --> 1.1261738571142623
========
day= 31
AVG= 1.7201800117048653
OA= 1.4222201746714553
LAS, $\epsilon$= 1/100 --> 1.151927123337785
LAS, $\epsilon$= 4/5 --> 1.137265545453496
========
day= 32
AVG= 1.5351628563329451
OA= 1.4055701942298833
LAS, $\epsilon$= 1/100 --> 1.2515240135499022
LAS, $\epsilon$= 4/5 --> 1.289009992350353
========
day= 33
AVG= 1.5288671874651332
OA= 1.30684266319432
LAS, $\epsilon$= 1/100 --> 1.1628210491121993
LAS, $\epsilon$= 4/5 --> 1.1497596647064905
========
day= 34
AVG= 1.4959204347041577
OA= 1.2997666679964768
LAS, $\epsilon$= 1/100 --> 1.0640654551767614
LAS, $\epsilon$= 4/5 --> 1.0790815220205865
========
day= 35
AVG= 1.52234687651847
OA= 1.3204912174153274
LAS, $\epsilon$= 1/100 --> 1.0651717257508149
LAS, $\epsilon$= 4/5 --> 1.082777869151016
========
day= 36
AVG= 1.5070501833946586
OA= 1.3046071251995555
LAS, $\epsilon$= 1/100 --> 1.0644921048217058
LAS, $\epsilon$= 4/5 --> 1.08648030564264
========
day= 37
AVG= 1.7320557063732365
OA= 1.4065679174141548
LAS, $\epsilon$= 1/100 --> 1.1840686419898623
LAS, $\epsilon$= 4/5 --> 1.214830105538579
========
day= 38
AVG= 1.6666169425652613
OA= 1.3992956295262022
LAS, $\epsilon$= 1/100 --> 1.1104793968389448
LAS, $\epsilon$= 4/5 --> 1.1170128830418802
========
day= 39
AVG= 1.4885234630980895
OA= 1.2834802145546185
LAS, $\epsilon$= 1/100 --> 1.2491513981076705
LAS, $\epsilon$= 4/5 --> 1.2585181093054454
========
day= 40
AVG= 1.6339771623426234
OA= 1.3600366562193724
LAS, $\epsilon$= 1/100 --> 1.112808345356134
LAS, $\epsilon$= 4/5 --> 1.1369164619812597
========
day= 41
AVG= 1.5592480226681114
OA= 1.2990481768053455
LAS, $\epsilon$= 1/100 --> 1.10795721225007
LAS, $\epsilon$= 4/5 --> 1.1193108322738532
========
day= 42
AVG= 1.5721003234575788
OA= 1.3382759669698772
LAS, $\epsilon$= 1/100 --> 1.0763520940323188
LAS, $\epsilon$= 4/5 --> 1.1039461566341628
========
day= 43
AVG= 1.4996661279900578
OA= 1.3283674131645578
LAS, $\epsilon$= 1/100 --> 1.065726541392903
LAS, $\epsilon$= 4/5 --> 1.0884977910768114
========
day= 44
AVG= 1.707983387494951
OA= 1.392185045904379
LAS, $\epsilon$= 1/100 --> 1.1127340307435782
LAS, $\epsilon$= 4/5 --> 1.1390405161763413
========
day= 45
AVG= 1.7195637206644407
OA= 1.443481382624125
LAS, $\epsilon$= 1/100 --> 1.1126290922596798
LAS, $\epsilon$= 4/5 --> 1.121419271489678
========
day= 46
AVG= 1.500670885093179
OA= 1.3472432120662359
LAS, $\epsilon$= 1/100 --> 1.2670058597786413
LAS, $\epsilon$= 4/5 --> 1.2597657434746048
========
day= 47
AVG= 1.4482091955265988
OA= 1.359317476528776
LAS, $\epsilon$= 1/100 --> 1.104225438423826
LAS, $\epsilon$= 4/5 --> 1.118480771133253
========
day= 48
AVG= 1.492728262048826
OA= 1.3137568584630226
LAS, $\epsilon$= 1/100 --> 1.0872636260049278
LAS, $\epsilon$= 4/5 --> 1.0943236513662256
========
day= 49
AVG= 1.5281982781160477
OA= 1.3729164880984766
LAS, $\epsilon$= 1/100 --> 1.093070024754543
LAS, $\epsilon$= 4/5 --> 1.1132559059695346
========
day= 50
AVG= 1.5089810336448335
OA= 1.3315222948545336
LAS, $\epsilon$= 1/100 --> 1.069813800324882
LAS, $\epsilon$= 4/5 --> 1.0970902279508292
========
day= 51
AVG= 1.6649636387099216
OA= 1.3669999357324356
LAS, $\epsilon$= 1/100 --> 1.115537646796744
LAS, $\epsilon$= 4/5 --> 1.128298181139902
========
day= 52
AVG= 1.7067373188960644
OA= 1.4293515365344411
LAS, $\epsilon$= 1/100 --> 1.1267002648251594
LAS, $\epsilon$= 4/5 --> 1.1397324634067
========
day= 53
AVG= 1.4931642054150183
OA= 1.2795767685480537
LAS, $\epsilon$= 1/100 --> 1.2746623225086084
LAS, $\epsilon$= 4/5 --> 1.2746514285799888
========
day= 54
###Markdown
Statistics for $\alpha = 3$
###Code
print("AVR has a mean competitive ratio of :", mean(y_AVR))
print("the worst competitive ratio of AVR is :", max(y_AVR))
print("=======")
print("Optimal Available has a mean competitive ratio of :", mean(y_OA))
print("the worst competitive ratio of Optimal Available is :", max(y_OA))
print("=======")
print("Statistics for the LAS algorithm")
epsilon = Fraction(1,100)
print("EPSILON = 1/100")
the_max = max(y_LAS[epsilon])
the_mean = mean(y_LAS[epsilon])
print("the mean competitive ratio is :", the_mean)
print("the worst competitive ration is :", the_max)
print("========")
epsilon = Fraction(80,100)
print("EPSILON = 8/10")
the_max = max(y_LAS[epsilon])
the_mean = mean(y_LAS[epsilon])
print("the mean competitive ratio is :", the_mean)
print("the worst competitive ration is :", the_max)
###Output
_____no_output_____
###Markdown
Experiments for $\alpha = 6$
###Code
y_LAS = {}
y_AVR = []
y_OA = []
alpha = alphas[1] # = 6
for epsilon in epsilons:
y_LAS[epsilon]=[]
for i in range(0,num_of_days-1, 1):
print("day=", i+1)
previous_day = dates_lst[i]
today = dates_lst[i+1]
J_pred = create_input(dates_dict[previous_day], D)
J_true = create_input(dates_dict[today], D)
AVR = AVR_energy_ratio(J_true, alpha)
OA = OA_energy_ratio(J_true, alpha)
print("AVG= ",AVR)
y_AVR.append(AVR)
print("OA= ",OA)
y_OA.append(OA)
for epsilon in epsilons:
LAS_scheduling = LAS_energy_ratio(J_true, J_pred, epsilon, alpha, dt)
print("LAS, $\epsilon$=", epsilon, "-->", LAS_scheduling)
y_LAS[epsilon].append(LAS_scheduling)
print("========")
###Output
_____no_output_____
###Markdown
Statistics for $\alpha = 6$
###Code
print("AVR has a mean competitive ratio of :", mean(y_AVR))
print("the worst competitive ratio of AVR is :", max(y_AVR))
print("=======")
print("Optimal Available has a mean competitive ratio of :", mean(y_OA))
print("the worst competitive ratio of Optimal Available is :", max(y_OA))
print("=======")
print("Statistics for the LAS algorithm")
epsilon = Fraction(1,100)
print("EPSILON = 1/100")
the_max = max(y_LAS[epsilon])
the_mean = mean(y_LAS[epsilon])
print("the mean competitive ratio is :", the_mean)
print("the worst competitive ration is :", the_max)
print("========")
epsilon = Fraction(80,100)
print("EPSILON = 8/10")
the_max = max(y_LAS[epsilon])
the_mean = mean(y_LAS[epsilon])
print("the mean competitive ratio is :", the_mean)
print("the worst competitive ration is :", the_max)
###Output
_____no_output_____
###Markdown
Experiments for $\alpha = 9$
###Code
y_LAS = {}
y_AVR = []
y_OA = []
alpha = alphas[2] #=9
for epsilon in epsilons:
y_LAS[epsilon]=[]
for i in range(0,num_of_days-1, 1):
print("day=", i+1)
previous_day = dates_lst[i]
today = dates_lst[i+1]
J_pred = create_input(dates_dict[previous_day], D)
J_true = create_input(dates_dict[today], D)
AVR = AVR_energy_ratio(J_true, alpha)
OA = OA_energy_ratio(J_true, alpha)
print("AVG= ",AVR)
y_AVR.append(AVR)
print("OA= ",OA)
y_OA.append(OA)
for epsilon in epsilons:
LAS_scheduling = LAS_energy_ratio(J_true, J_pred, epsilon, alpha, dt)
print("LAS, $\epsilon$=", epsilon, "-->", LAS_scheduling)
y_LAS[epsilon].append(LAS_scheduling)
print("========")
###Output
_____no_output_____
###Markdown
Statistics for $\alpha = 9$
###Code
print("AVR has a mean competitive ratio of :", mean(y_AVR))
print("the worst competitive ratio of AVR is :", max(y_AVR))
print("=======")
print("Optimal Available has a mean competitive ratio of :", mean(y_OA))
print("the worst competitive ratio of Optimal Available is :", max(y_OA))
print("=======")
print("Statistics for the LAS algorithm")
epsilon = Fraction(1,100)
print("EPSILON = 1/100")
the_max = max(y_LAS[epsilon])
the_mean = mean(y_LAS[epsilon])
print("the mean competitive ratio is :", the_mean)
print("the worst competitive ration is :", the_max)
print("========")
epsilon = Fraction(80,100)
print("EPSILON = 8/10")
the_max = max(y_LAS[epsilon])
the_mean = mean(y_LAS[epsilon])
print("the mean competitive ratio is :", the_mean)
print("the worst competitive ration is :", the_max)
###Output
_____no_output_____
###Markdown
Experiments for $\alpha = 12$
###Code
y_LAS = {}
y_AVR = []
y_OA = []
alpha = alphas[3] #=12
for epsilon in epsilons:
y_LAS[epsilon]=[]
for i in range(0,num_of_days-1, 1):
print("day=", i+1)
previous_day = dates_lst[i]
today = dates_lst[i+1]
J_pred = create_input(dates_dict[previous_day], D)
J_true = create_input(dates_dict[today], D)
AVR = AVR_energy_ratio(J_true, alpha)
OA = OA_energy_ratio(J_true, alpha)
print("AVG= ",AVR)
y_AVR.append(AVR)
print("OA= ",OA)
y_OA.append(OA)
for epsilon in epsilons:
LAS_scheduling = LAS_energy_ratio(J_true, J_pred, epsilon, alpha, dt)
print("LAS, $\epsilon$=", epsilon, "-->", LAS_scheduling)
y_LAS[epsilon].append(LAS_scheduling)
print("========")
###Output
_____no_output_____
###Markdown
Statistics for $\alpha = 12$
###Code
print("AVR has a mean competitive ratio of :", mean(y_AVR))
print("the worst competitive ratio of AVR is :", max(y_AVR))
print("=======")
print("Optimal Available has a mean competitive ratio of :", mean(y_OA))
print("the worst competitive ratio of Optimal Available is :", max(y_OA))
print("=======")
print("Statistics for the LAS algorithm")
epsilon = Fraction(1,100)
print("EPSILON = 1/100")
the_max = max(y_LAS[epsilon])
the_mean = mean(y_LAS[epsilon])
print("the mean competitive ratio is :", the_mean)
print("the worst competitive ration is :", the_max)
print("========")
epsilon = Fraction(80,100)
print("EPSILON = 8/10")
the_max = max(y_LAS[epsilon])
the_mean = mean(y_LAS[epsilon])
print("the mean competitive ratio is :", the_mean)
print("the worst competitive ration is :", the_max)
###Output
_____no_output_____ |
Web_Scraping/RCS HTML to Text conversion.ipynb | ###Markdown
RCS HTML to Text conversion https://pypi.org/project/html2text/ Originally written by late Aaron Swartz. This code is distributed under the GPLv3.
###Code
!pip install html2text
import html2text
print(html2text.html2text("<p><strong>Zed's</strong> dead baby, <em>Zed's</em> dead.</p>"))
## Some configuration
h = html2text.HTML2Text()
h.ignore_links = True
print(h.handle("<p>Hello, <a href='http://earth.google.com/'>world</a>!"))
h.ignore_links = False
print(h.handle("<p>Hello, <a href='http://earth.google.com/'>world</a>!"))
###Output
Hello, world!
Hello, [world](http://earth.google.com/)!
|
Validation/ModelA&Strided-CNN-A.ipynb | ###Markdown
###Code
# Mount Google Drive
from google.colab import drive # import drive from google colab
ROOT = "/content/drive" # default location for the drive
print(ROOT) # print content of ROOT (Optional)
drive.mount(ROOT) # we mount the google drive at /content/drive
from google.colab import files
uploaded = files.upload()
from Model import Model
try:
from torchsummary import summary
except ModuleNotFoundError:
!pip install -q torchsummary
from torchsummary import summary
try:
import torch
except ModuleNotFoundError:
from os import path
from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag
platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag())
accelerator = 'cu80' if path.exists('/opt/bin/nvidia-smi') else 'cpu'
!pip install -q http://download.pytorch.org/whl/{accelerator}/torch-0.3.1-{platform}-linux_x86_64.whl
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
try:
import torchvision
except ModuleNotFoundError:
!pip install -q torchvision
from torchvision import datasets, transforms
cuda = True
train_batch_size = 256
test_batch_size = 124
best_loss = float("inf")
best_epoch = -1
dataset_path = './cifar10'
gsync_save = True
if gsync_save:
try:
import utils
except ModuleNotFoundError:
!wget https://raw.githubusercontent.com/StefOe/colab-pytorch-utils/HEAD/utils.py
import utils
gsync = utils.GDriveSync()
import numpy as np
np.random.seed(1)
indexSet=np.random.permutation(50000)
prob=0.8
indexTrain=indexSet[0:int(50000*prob)]
indexValid=indexSet[int(50000*prob):-1]
cuda = cuda and torch.cuda.is_available()
CIFAR10_Train = datasets.CIFAR10(root=dataset_path, train=True, download=True)
train_mean = CIFAR10_Train.data[indexTrain].mean(axis=(0,1,2))/255 # [0.49139968 0.48215841 0.44653091]
train_std = CIFAR10_Train.data[indexTrain].std(axis=(0,1,2))/255 # [0.24703223 0.24348513 0.26158784]
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(train_mean, train_std),
])
del CIFAR10_Train
CIFAR10_Train=datasets.CIFAR10( root=dataset_path, train=True, download=True, transform=transform_train)
teainSet=torch.utils.data.Subset(CIFAR10_Train,indexTrain)
validSet=torch.utils.data.Subset(CIFAR10_Train,indexValid)
kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
train_loader = torch.utils.data.DataLoader(teainSet,
batch_size=train_batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(validSet,
batch_size=test_batch_size, shuffle=False, **kwargs)
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(train_mean, train_std),
])
CIFAR10_test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root=dataset_path, train=False, download=True,
transform=transform_test),
batch_size=test_batch_size, shuffle=False, **kwargs)
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data))
global_loss=100000000.0
global_acc=0
global_model_path=''
def test(epoch, best_loss, best_epoch, lr,global_loss,global_acc,global_model_path):
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data)
# sum up batch loss
test_loss += criterion(output, target).data
# get the index of the max log-probability
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).long().cpu().sum()
test_loss /= len(test_loader.dataset)
print(
'\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset), 100. * correct /
len(test_loader.dataset)))
if test_loss < best_loss:
best_epoch = epoch
best_loss = test_loss
torch.save(model, "best_"+str(lr)+".pt")
try:
if gsync_save:
gsync.update_file_to_folder("best_"+str(lr)+".pt")
except:
print('Failed to gsync_save.')
# Save the best model among three different lr
if test_loss<global_loss:
try:
model_save_name = 'best_'+str(lr)+'_'+str(np.where(model.baseModel))+'_'+str(np.where(model.modifiedModel))+'_'
path = F"/content/drive/My Drive/dl-reproducibility-project/model/{model_save_name}"
torch.save(model.state_dict(), path+'.epoch-{}.pt'.format(epoch))
except:
print('Failed to save best model to personal google drive')
global_acc=correct
global_loss=test_loss
try:
os.remove(global_model_path)
except:
print('Failed to delete the file')
global_model_path=global_model_path= path+'.epoch-{}.pt'.format(epoch)
return best_loss, best_epoch, correct,global_loss,global_acc,global_model_path
def evaluate():
model.eval()
prediction=[]
test_loss = 0
correct = 0
for data, target in CIFAR10_test_loader:
if cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data)
prediction+=[output]
# sum up batch loss
test_loss += criterion(output, target).data
# get the index of the max log-probability
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).long().cpu().sum()
test_loss /= len(CIFAR10_test_loader.dataset)
print(
'\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(CIFAR10_test_loader.dataset), 100. * correct /
len(CIFAR10_test_loader.dataset)))
return test_loss, correct, prediction
import os
try:
os.makedirs('/content/drive/My Drive/dl-reproducibility-project/model/')
except:
print('')
###Output
###Markdown
Model A without modification
###Code
for lr in [0.1,0.05,0.01]:
print("Training with ", lr)
try:
del model
except:
print('')
# Maybe it's redundant
try:
del criterion
del optimizer
del scheduler
except:
print('')
try:
torch.cuda.empty_cache()
except:
print('')
#
# Chnage Here
#
# Choose Model A by basemodel=[True, False, False] and choose B by basemodel=[False, True, False]
# Choose the modification by modifiedModel=[True, False, False, False]. The first one is the model without any modification.
# The scecond one is the strided. The third: ConvPool. The last: All CNN.
model = Model()
if cuda:
model.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
scheduler = optim.lr_scheduler.MultiStepLR(
optimizer, milestones=[200, 250, 300], gamma=0.1)
print(model)
summary(model.model, input_size=(3, 32, 32))
for epoch in range(350):
scheduler.step()
train(epoch)
best_loss, best_epoch, acc,global_loss,global_acc,global_model_path = test(epoch, best_loss, best_epoch, lr,global_loss,global_acc,global_model_path)
if (epoch>100)&(acc.item()>0)&((acc.item()/len(test_loader.dataset))<=0.1):
print("Stop at acc of test set:",acc.item()/len(test_loader.dataset))
break
try:
del model
except:
print('')
# Maybe it's redundant
try:
del criterion
del optimizer
del scheduler
except:
print('')
try:
torch.cuda.empty_cache()
except:
print('')
#
# Chnage Here
#
# Choose Model A by basemodel=[True, False, False] and choose B by basemodel=[False, True, False]
# Choose the modification by modifiedModel=[True, False, False, False]. The first one is the model without any modification.
# The scecond one is the strided. The third: ConvPool. The last: All CNN.
model = Model()
model.load_state_dict(torch.load(global_model_path))
if cuda:
model.cuda()
criterion = nn.CrossEntropyLoss()
test_loss, correct, prediction=evaluate()
print('Validation Set')
print('Error: ',global_loss.item())
print('Acc',global_acc.item()/len(indexValid))
print('Model: ',global_model_path)
###Output
Validation Set
Error: 0.005021385848522186
Acc 0.7880788078807881
Model: /content/drive/My Drive/dl-reproducibility-project/model/best_0.01.epoch-178.pt
###Markdown
Strided-CNN-A
###Code
modifiedModel=[False, True, False, False]
for lr in [0.1,0.05,0.01]:
print("Training with ", lr)
try:
del model
except:
print('')
# Maybe it's redundant
try:
del criterion
del optimizer
del scheduler
except:
print('')
try:
torch.cuda.empty_cache()
except:
print('')
#
# Chnage Here
#
# Choose Model A by basemodel=[True, False, False] and choose B by basemodel=[False, True, False]
# Choose the modification by modifiedModel=[True, False, False, False]. The first one is the model without any modification.
# The scecond one is the strided. The third: ConvPool. The last: All CNN.
model = Model(modifiedModel=modifiedModel)
if cuda:
model.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
scheduler = optim.lr_scheduler.MultiStepLR(
optimizer, milestones=[200, 250, 300], gamma=0.1)
print(model)
summary(model.model, input_size=(3, 32, 32))
for epoch in range(350):
scheduler.step()
train(epoch)
best_loss, best_epoch, acc,global_loss,global_acc,global_model_path = test(epoch, best_loss, best_epoch, lr,global_loss,global_acc,global_model_path)
if (epoch>100)&(acc.item()>0)&((acc.item()/len(test_loader.dataset))<=0.1):
print("Stop at acc of test set:",acc.item()/len(test_loader.dataset))
break
try:
del model
except:
print('')
# Maybe it's redundant
try:
del criterion
del optimizer
del scheduler
except:
print('')
try:
torch.cuda.empty_cache()
except:
print('')
#
# Chnage Here
#
# Choose Model A by basemodel=[True, False, False] and choose B by basemodel=[False, True, False]
# Choose the modification by modifiedModel=[True, False, False, False]. The first one is the model without any modification.
# The scecond one is the strided. The third: ConvPool. The last: All CNN.
model = Model(modifiedModel=modifiedModel)
model.load_state_dict(torch.load(global_model_path))
if cuda:
model.cuda()
criterion = nn.CrossEntropyLoss()
test_loss, correct, prediction=evaluate()
print('Validation Set')
print('Error: ',global_loss.item())
print('Acc',global_acc.item()/len(indexValid))
print('Model: ',global_model_path)
###Output
Training with 0.1
Model(
(model): Sequential(
(0): Dropout(p=0.2, inplace=False)
(1): Conv2d(3, 96, kernel_size=(5, 5), stride=(2, 2), padding=(1, 1))
(2): ReLU()
(3): Dropout(p=0.5, inplace=False)
(4): Conv2d(96, 192, kernel_size=(5, 5), stride=(2, 2), padding=(1, 1))
(5): ReLU()
(6): Dropout(p=0.5, inplace=False)
(7): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(8): ReLU()
(9): Conv2d(192, 192, kernel_size=(1, 1), stride=(1, 1))
(10): ReLU()
(11): Conv2d(192, 10, kernel_size=(1, 1), stride=(1, 1))
(12): ReLU()
(13): AdaptiveAvgPool2d(output_size=1)
(14): Flatten()
)
(conv_3_192_192_1): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(conv_1_192_192): Conv2d(192, 192, kernel_size=(1, 1), stride=(1, 1))
(conv_1_192_class): Conv2d(192, 10, kernel_size=(1, 1), stride=(1, 1))
(BN_96): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(BN_192): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(BN_class): BatchNorm2d(10, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(maxP): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
(softMax): Softmax(dim=None)
(flatten): Flatten()
(dropOut_2): Dropout(p=0.2, inplace=False)
(dropOut_5): Dropout(p=0.5, inplace=False)
(relu): ReLU()
(avgPooling): AdaptiveAvgPool2d(output_size=1)
(conv_5_Input_96_1): Conv2d(3, 96, kernel_size=(5, 5), stride=(1, 1), padding=(1, 1))
(conv_5_96_192_1): Conv2d(96, 192, kernel_size=(5, 5), stride=(1, 1), padding=(1, 1))
(conv_5_Input_96_2): Conv2d(3, 96, kernel_size=(5, 5), stride=(2, 2), padding=(1, 1))
(conv_5_96_192_2): Conv2d(96, 192, kernel_size=(5, 5), stride=(2, 2), padding=(1, 1))
)
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Dropout-1 [-1, 3, 32, 32] 0
Conv2d-2 [-1, 96, 15, 15] 7,296
ReLU-3 [-1, 96, 15, 15] 0
Dropout-4 [-1, 96, 15, 15] 0
Conv2d-5 [-1, 192, 7, 7] 460,992
ReLU-6 [-1, 192, 7, 7] 0
Dropout-7 [-1, 192, 7, 7] 0
Conv2d-8 [-1, 192, 7, 7] 331,968
ReLU-9 [-1, 192, 7, 7] 0
Conv2d-10 [-1, 192, 7, 7] 37,056
ReLU-11 [-1, 192, 7, 7] 0
Conv2d-12 [-1, 10, 7, 7] 1,930
ReLU-13 [-1, 10, 7, 7] 0
AdaptiveAvgPool2d-14 [-1, 10, 1, 1] 0
Flatten-15 [-1, 10] 0
================================================================
Total params: 839,242
Trainable params: 839,242
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.01
Forward/backward pass size (MB): 1.03
Params size (MB): 3.20
Estimated Total Size (MB): 4.24
----------------------------------------------------------------
|
HW1/IR Metrics.ipynb | ###Markdown
IR Metrics * Precision
###Code
import numpy as np
def precision(relevance: list):
"""
Computes the precision of a given array
:param relevance: a binary list
:return: float value of the precision of the given list
"""
l = np.array(relevance)
return l.sum()/len(l)
precision([0,1,0,1])
###Output
_____no_output_____
###Markdown
* Precision at K
###Code
# [1,0,1,0]
# l[:k].sum()/k
#
# k = 1 -> 100%
#
# k = 2 -> 50%
#
# k = 3 -> 66.6%
#
# k = 4 -> 50%
def precision_at_k(relevance: list, k: int):
"""
Computes the precision at k of a given array
:param k: the value of k
:param relevance: a binary list
:return: float value of the precision at k of the given list
"""
if k == 0:
return 0
l = np.array(relevance[:k]).sum()/k
return l
precision_at_k([0, 0, 0, 1], 1)
###Output
_____no_output_____
###Markdown
* Recall at K
###Code
def recall_at_k(relevance: list, nr_relevant: int, k: int):
"""
Computes the recall at k of a given array
:param k: the value of k
:param relevance: a binary list
:return: float value of the recall at k of the given list
"""
l = np.array(relevance[:k]).sum()/nr_relevant
return l
recall_at_k([0, 0, 0, 1], 4, 1)
###Output
_____no_output_____
###Markdown
* Average precision
###Code
def average_precision(relevance):
"""
Computes the average precision of a given list
Supposes that the input binary vector contains all relevant documents.
:param relevance: a binary list
:return: float value of the average precision of the given list
"""
length = len(relevance)
sum = 0
for i in range(length):
if relevance[i]:
sum += precision_at_k(relevance, i+1)
return sum / np.array(relevance).sum()
average_precision([0, 1, 0, 1, 1, 1, 1])
###Output
_____no_output_____
###Markdown
* Mean average precision
###Code
def mean_avg_precision(l):
"""
Computes the MAP of a given list
:param l: an array of arrays, one for each of the queries
:return: float value of the MAP of the given list of lists
"""
mean = np.array([ average_precision(lista) for lista in l]).mean()
return mean
mean_avg_precision([[0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 1, 1], [0, 1, 0, 1, 1, 1, 1]])
print(0.5961904761904762)
###Output
_____no_output_____
###Markdown
* DCG at K
###Code
def dcg_at_k(relevance, k: int):
"""
Computes the dcg at k of a given list
:param k: the value of k
:param relevance: a binary list
:return: float value of the dcg at k of the given list
"""
sum = 0
i = 0
for rel_i in relevance[: k]:
i+= 1
sum += rel_i/np.log2(max(i, 2))
return sum
dcg_at_k([4, 4, 3, 0, 0, 1, 3, 3, 3, 0], 6)
###Output
_____no_output_____
###Markdown
* NDCG at K
###Code
def ndcg_at_k(relevance, k):
"""
Computes the normalized dcg at k of a given list
:param k: the value of k
:param relevance: a binary list
:return: float value of the ndcg at k of the given list
"""
rel_sorted = sorted(relevance, reverse=True)
max = dcg_at_k(rel_sorted, k)
real = dcg_at_k(relevance, k)
return real/ max
ndcg_at_k([4, 4, 3, 0, 0, 1, 3, 3, 3, 0], 6)
###Output
_____no_output_____ |
JNotebook/The_Basics/Hypothesis_Testing/Hypothesis Testing.ipynb | ###Markdown
Statistical Hypothesis Testing Null and Alternate HypothesisStatistical **Hypothesis Testing** is making an assumption (hypothesis) and testing with the test data to see if the assumption was correct or incorrect. Every hypothesis test, regardless of the data population and other parameters involved, requires the three steps below.* Making an initial assumption.* Collecting evidence (data).* Based on the available evidence (data), deciding whether to reject or not reject the initial assumption.The initial assumption made is called **Null Hypothesis (H-0)** and the alternative (opposite) to the **Null Hypothesis** is called the **Alternate Hypothesis (H-A)**Two widely used approach to **hypothesis testing** are* Critical value approach* p-value approachThe **Critical value** approach involves comparing the observed test statistic to some cutoff value, called the **Critical Value**. If the test statistic is more extreme (i.e. more than the **Upper Critical Value** or less than the **Lower Critical Value**) than the **Critical Value**, then the null hypothesis is rejected in favor of the alternative hypothesis. If the test statistic is not as extreme as the critical value, then the null hypothesis is not rejected.The **p-value** approach involves determining the probability of observing a more extreme test statistics in the direction of **Alternate Hypothesis**, assuming the null hypothesis were true. If the **p-value** is less than (or equal to) **α (the accepted level of p-value)**, then the null hypothesis **is rejected** in favor of the alternative hypothesis. If the P-value is greater than **α (the critical value)**, then the null hypothesis **is not rejected**. Z-Score and p-ValueIn this section we are just learning the definitions of **Z-Score** and **p-Value** and their inter-relations. In a subsequent section we will use the Z-Score, p-value along with **Level of Confidence** or **Level of Significance** to test a hypothesis (i.e. Reject (i.e. the Alternate Hypothesis is acceptedas the new norm. the Null Hypothesis or Fail to Reject the Null Hypothesis (i.e. Null Hypothesis remains valid)A **Z-Score** of a sample of data is a score that expresses the value of a distribution in standard deviation with respect to the mean. It shows how far (**how many Standard Deviation**) a specific value of data is from the sample **Mean**.Z-Score is calcualted by the formula**z = (X - X-bar)/Std-dev**where X = a Data ValueX-bar = Sample Mean Std-dev = Standard Deviation of the sample**p-value** of a Data Value is the probability of obtaining a sample data that is "more extreme* than the ones observed in your data assuming the Null Hypothesis is true.The p-value of a z-score can be obtained from a Statistical Z-Table or using a Python Library function. Here we will use the Python Library function.**p-value = stats.norm.cdf(z-score)**However, depending on the data we are trying to test (in the case 53) compared to the currently known data (National Average = 60, Standard Deviation = 3) we may have to use a slightly different formula. Do do that we need to learn the **Left Tail** and **Right Tail** tests. Left-Tail, Right-Tail and Two-Tail Tests of HypothesisIf the data we are trying to test (53) is **less than** the **Mean** (60) we use the **Left Tail Test**. If the data (say the class average was 68 as opposed to 53) is **greater than** the **Mean** (60), we use the **Right Tail Test**.For a **Right Tail Test** the formula for p-value (again using a Python Library function) is**p-value = 1- stats.norm.cdf(z-score)*****p-value for a z-score can be looked up from the Statistical Z-Table*** An Example of Z-Score and p-valueAssume that we have the scores of a test in Business Analytics in a class of 100. The Mean of the sample (100 test scores) is 53. The National Average of the same test is 60 with a Standard Deviation of 3. We want to calculate the Z-score and p-value for this class sample (Average is 53) with respect to the National data (Average = 60, Standard Deviation = 3) to test our hypothesis "the class score is similar to the National Average"Here we will calculate the z-score and corresponding p-value for Case-1 where the **class average is 53** and Case-2 where the **class average is 63**
###Code
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import scipy.stats as stats
# Example of a Left Tail Test
print('========== Example of a Left Tail Test ============')
# Case-1 where class score mean = 53
print('Class score mean = ', 53)
# Calculating the z-score of 53 with respect to the National Score (Mean = 60, S-Dev = 3)
zscore1 = round((53 - 60)/3, 2)
print('Zscore for mean class score (53) = ', zscore1)
# Since 53 is less than the national average 60 we will do the Left Tail Test
prob1 = round(stats.norm.cdf(zscore1), 6)
print('p-value for the mean class score (53) = ', prob1)
# Example of a Right Tail Test
print('========== Example of a Right Tail Test ============')
# Case-2 where class score mean = 63
print('Class score mean = ', 63)
# Calculating the z-score of 68 with respect to the National Score (Mean = 60, S-Dev = 3)
zscore2 = round((63 - 60)/3, 2)
print('Zscore for mean class score (63) = ', zscore2)
# Since 68 is more than the national average 60 we will do the Right Tail Test
prob2 = round(1 - stats.norm.cdf(zscore2), 6)
print('p-value for the mean class score (63) = ', prob2)
###Output
========== Example of a Left Tail Test ============
Class score mean = 53
Zscore for mean class score (53) = -2.33
p-value for the mean class score (53) = 0.009903
========== Example of a Right Tail Test ============
Class score mean = 63
Zscore for mean class score (63) = 1.0
p-value for the mean class score (63) = 0.158655
###Markdown
Level of Confidence and Level of SignificanceSince the results of statistical test are not **definite proof** of the conclusion, the results are always associsated with a **Level of Confidence** or a **Livel of Significance**. Normally we would strive for a high **Level of Confidence** or a statistically significant result with high **Level of Significance** when we are testing if a Null Hypothesis is true or the Alternate Hypothesis should replace the Null Hypothesis.Usually the **Level of Confidence (C)** used are 95% (0.95), 99% (0.99) etc. for the conclusions of a hypothesis testing to be considered **"reliable"**. **Level of Significance** is the inverse of Level of Confidence, i.e. **Level of Significance = 1 - Level of Confidence** or S = 1- C. For Level of Confidence of 99% (0.99) the Level of Significance is 0.01 and for the Level of Confidence of 95% (0.95), the Level of Significance is 0.05.In majority of hypothesis tests a Level of Significance of 0.05 is used. This is called the **Critical Value α** to test the p-value (calculated in the previous step)If the p-value is **less than** the **Critical Value α**, the test results are considered as "highly significant**. **Critical Value α = 0.01**, by the same token is considered as "very highly significant". Hypothesis Testing Using Z-Score, p-Value and Level of SignificanceIn a hypothesis test using -Score and p-value, if the p-value is less than **Critical Value α** (0.05 in our case), the test is considered statistically highly significant and Alternate Hypothesis is accepted and the Null Hypothesis is rejected and vice versa.In our test case-1 where the mean class score is 53, the p-value is 0.00993 which is less than the Critical Value α (0.05), the Null Hypothesis, that the mean marks of the class is similar to the national average is **Rejected**In test case-2 where the mean class score is 66, the p-value is 0.02275 which is more than the Critical Value α (0.05), the Null Hypothesis, that the mean marks of the class is similar to the national average is **Accepted/Retained**A Two-Tailed test can also be used in the above case using the same concepts of Z-Score, p-value and α, the Critical Significance Level. We will discuss Hypothesis Testing in more details in the **Descriptive Analytics** section. Getting p-value from z-score and z-score from p-valueWe have already used **stats.norm.cdf(zscore1)** to get p-value from z-score***p-value = stats.norm.cdf(zscore1)***Now we will use stats.norm.ppf(p-value) to get z-score from p-value***z-score = stats.norm.ppf(c-value), remembering, p-value = 1 - c-value***Let us calculate z-score for the most commonly used **Confidence Levels (C)** of 90% (0.9), 95% (0.95), 98% (0.98) and 99% (0.99), i.e. the most commonly used **Significance Levels (S)** of 0.1, 0.05, 0.02 and 0.01 respectively
###Code
import scipy.stats as stats
from scipy.stats import norm
z_score_1 = stats.norm.ppf(0.9) # for C= 0.9 i.e. p = 0.1
print(z_score_1)
z_score_2 = stats.norm.ppf(0.95) # for C= 0.95 i.e. p = 0.05
print(z_score_2)
z_score_3 = stats.norm.ppf(0.98) # for C= 0.98 i.e. p = 0.02
print(z_score_3)
z_score_4 = stats.norm.ppf(0.99) # for C= 0.99 i.e. p = 0.01
print(z_score_4)
# For 2-tail test the corresponding z-scores are (+-)1.645, 1.96, 2.33 and 2.575 respectively (show calc with α/2 )
print("===================================================================")
z_score_5 = stats.norm.ppf(0.95) # for C= 0.95 i.e. p = 0.05 on each tail
print(z_score_5)
z_score_6 = stats.norm.ppf(0.975) # for C= 0.975 i.e. p = 0.025 on each tail
print(z_score_6)
z_score_7 = stats.norm.ppf(0.99) # for C= 0.99 i.e. p = 0.01 on each tail
print(z_score_7)
z_score_8 = stats.norm.ppf(0.995) # for C= 0.995 i.e. p = 0.005 on each tail
print(z_score_8)
z_score_9 = stats.norm.ppf(0.900) # for C= 0.900 i.e. p = 0.01 on each tail
print(z_score_9)
std_dev = 0.5
for x in [0.90, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99]:
z_score = stats.norm.ppf(x)
margin_of_error = round((1-x), 2)
sample_size = round(round((z_score**2) * (std_dev * (1- std_dev)), 4)/round((margin_of_error**2), 4), 2)
print('Confidence Level =', x, 'Margin of Error = ', margin_of_error, 'Z-Score = ', z_score, ' Standard Deviation = ', std_dev, 'Sample Size = ', sample_size)
###Output
Confidence Level = 0.9 Margin of Error = 0.1 Z-Score = 1.2815515655446004 Standard Deviation = 0.5 Sample Size = 41.06
Confidence Level = 0.91 Margin of Error = 0.09 Z-Score = 1.3407550336902165 Standard Deviation = 0.5 Sample Size = 55.48
Confidence Level = 0.92 Margin of Error = 0.08 Z-Score = 1.4050715603096329 Standard Deviation = 0.5 Sample Size = 77.12
Confidence Level = 0.93 Margin of Error = 0.07 Z-Score = 1.475791028179171 Standard Deviation = 0.5 Sample Size = 111.12
Confidence Level = 0.94 Margin of Error = 0.06 Z-Score = 1.5547735945968535 Standard Deviation = 0.5 Sample Size = 167.86
Confidence Level = 0.95 Margin of Error = 0.05 Z-Score = 1.6448536269514722 Standard Deviation = 0.5 Sample Size = 270.56
Confidence Level = 0.96 Margin of Error = 0.04 Z-Score = 1.7506860712521692 Standard Deviation = 0.5 Sample Size = 478.88
Confidence Level = 0.97 Margin of Error = 0.03 Z-Score = 1.8807936081512509 Standard Deviation = 0.5 Sample Size = 982.56
Confidence Level = 0.98 Margin of Error = 0.02 Z-Score = 2.0537489106318225 Standard Deviation = 0.5 Sample Size = 2636.25
Confidence Level = 0.99 Margin of Error = 0.01 Z-Score = 2.3263478740408408 Standard Deviation = 0.5 Sample Size = 13530.0
###Markdown
Example Scenarios of Different Types of Hypothesis Tests Example - 1*** A company has stated that they make straw machine that makes straws that are 4 mm in diameter. A worker belives that the machine no longer makes straws of this size and samples 100 straws to perform a hypothesis test with 99% Confidence level. Write the null and alternate hypothesis and any other related data.*** H-0: µ = 4 mm H-a: µ != 4 mm n = 100, C = 0.99, Critical Value α = 1 - C = 0.01 Example - 2*** Doctors believe that the average teen sleeps on average no longer than 10 hours per day. A researcher belives that the teens sleep longer. Write the H-0 and H-a*** H-0: µ 10 Example - 3*** The school board claims that at least 60% of students bring a phone to school. A teacher believes this number is too high and randomly samples 25 students to test at a Significance Level of 0.02. Write the H-0, H-a and other related informations*** H-0: p >= 0.60 H-a: p < 0.60 n = 25 Critical Value α = 0.02 C = 1 - α = 1- 0.02 = 0.98 (98%) With the available information, it is possible to write the **null** and **alternate** hypotheses, but in these examples we do not have enough information to test them.Recall the steps of hypothesis tests outlined above* Write the hypotheses H-0 and H-a* Given µ, standard deviation calculate the z-score for the number to be tested using formula z = (X-bar - µ)/Std-dev* Calculate the p-value using the python function p-value = 1- stats.norm.cdf(z-score)* Given Significance Level Critical Value α or given Confidence Level calculate Critical Value α = 1-C* For **Left Tail** test use the p-value calculated* For **Right Tail Test** p-value = 1- (calculated p-value)* For **Two Tail Test** compare the calculated p-vlaue with α/2* If the calculated p-value is **less** than Critical Value α, **reject** Null Hypothesis else **fail to reject** the Null Hypothesis***Note: If H-a has , it is a Right Tail Test, if H-a has != it is a 2-Tail Test***So, to be able to test the hypothesis we need to have x (the value to be tested), x-bar (sample mean), std-dev (sample standard deviation, required Confidence Level or the required Significance Level.In the next example we will go through these steps (assuming all the necessary information are given) Example - 4Records show that students on average score less than or equal to 850 on a test. A test prep company says that the students who take their course will score higher than this. To test, they sample 1000 students who score on an average of 856 with a standard deviation of 98 after taking the course. At 0.05 Significance Level, test the company claim. H-0: µ 850 n = 1000 x-bar = 856 std-dev = 98 α = 0.05 (C = 0.95 or 95%) Let's calculate the z-score and p-value to test the hypothesis. It is a **Right Tail Test**
###Code
import numpy as np
from scipy.stats import norm
x_bar = 856
µ = 850
s_dev = 98
z_score = (x_bar - µ)/s_dev
print("Z-score = ", z_score)
p_value = (1 - norm.cdf(z_score)) # since it is a Right Tail test
print("p-value = ", p_value)
###Output
Z-score = 0.061224489795918366
p-value = 0.4755902131389005
###Markdown
***Since the calculated p-value is greater than α (0.05) we fail to reject the null hypothesis, i.e. company claim is invalid or NOT Statistically Significant*** Example - 5A newspaper reports that the average age a woman gets married is 25 years or less. A researcher thinks that the average age is higher. He samples 213 women and gets an average of 25.4 years with standard deviation of 2.3 years. With 95% Confidence Level, test the researcher's claim.Let's calculate the z-score and p-value to test the hypothesis. It is a **Right Tail Test** H-0: µ 25 n = 213 x-bar = 25.4 s-dev = 2.3 C = 95% = 0.95 α = 0.05Let's calculate the z-score and p-value to test the hypothesis. It is a **Right Tail Test**
###Code
import numpy as np
from scipy.stats import norm
x_bar = 25.4
µ = 25
s_dev = 2.3
z_score = (x_bar - µ)/s_dev
print("Z-score = ",z_score)
p_value = (1 - stats.norm.cdf(z_score)) # since it is a Right Tail test
print("p-value = ", p_value)
###Output
Z-score = 0.17391304347826025
p-value = 0.43096690081487876
###Markdown
***Since the calculated p-value is greater than α (0.05) we fail to reject the null hypothesis, i.e. researcher's claim is invalid or NOT Statistically Significant*** Example - 6A study showed that on an average women in a city had 1.48 kids. A researcher believes that the number is wrong. He surveys 128 women in the city and finds that on an average these women had 1.39 kids with standard deviation of 0.84 kids. At 90% Confidence Level, test the claim. H-0: µ = 1.48 H-a: µ != 1.48 n = 128 x-bar = 1.39 s-dev = 0.84 C = 90% = 0.9. Let's calculate the z-score and p-value to test the hypothesis. It is a **Two Tail Test**. This is a Two Tailed Test, so critical value = (1 -c) /2 = 0.05
###Code
import numpy as np
from scipy.stats import norm
x_bar = 1.39
µ = 1.48
s_dev = 0.84
z_score = (x_bar - µ)/s_dev
print("Z-score = ", z_score)
p_value = stats.norm.cdf(z_score) # since it is a Two Tail test
print("p-value = ",p_value)
###Output
Z-score = -0.10714285714285725
p-value = 0.4573378238740764
###Markdown
***Since the calculated p-value is greater than α/2 (0.05) we fail to reject the null hypothesis, i.e. researcher's claim is invalid or NOT Statistically Significant*** Example - 7The government says the average weight of males is 162.9 pounds or greater. A researcher thinks this is too high. He does a study of 39 males and gets an average weight of 160.1 pounds with a standard deviation of 1.6 pounds. At 0.05 Significance Level, test the claim. H-0: µ >= 162.9 H-a: µ < 162.9 n = 39 x-bar = 160.1 s-dev = 1.6 α = 0.05Let's calculate the z-score and p-value to test the hypothesis. It is a **Left Tail Test**
###Code
import numpy as np
from scipy.stats import norm
x_bar = 160.1
µ = 162.9
s_dev = 1.6
z_score = (x_bar - µ)/s_dev
print("Z-score = ", z_score)
p_value = stats.norm.cdf(z_score) # since it is a Left Tail test
print("p-value = ",p_value)
###Output
Z-score = -1.750000000000007
p-value = 0.040059156863816475
|
2021/2021-01-31 Form der Aktivitätsdiagramme.ipynb | ###Markdown
Welche Form sollen die Aktivitätsdiagramme haben? Kreise
###Code
import math
from IPython.display import display, HTML
def create_progress_bars(bar_func, nr_actions, color):
if nr_actions == 0:
return ""
log_actions = math.log10(nr_actions)
paths = [bar_func(x=100, y=100, radius=20+20*i,
progress=min(1, log_actions - i), color=color)
for i in range(math.ceil(log_actions))]
return "\n".join(paths)
def create_activity_chart(bar_func, nr_actions, kind, color,
max_edits=10000, color_missing_actions="#eee"):
nr_actions = min(max_edits, nr_actions)
result = '<div style="width: 200px;">'
result += '<svg height="200" width="200">'
result += create_progress_bars(bar_func, max_edits, color_missing_actions)
result += create_progress_bars(bar_func, nr_actions, color)
result += """</svg>"""
result += '<p style="width: 100%%; text-align: center;"><b>%s %s</b></p>' % (nr_actions, kind)
result += '</div>'
return result
def display_example(bar_func):
html = '<div style="display: flex; flex-flow: row wrap;">'
for edits in [0, 4, 13, 35, 200, 345, 6789, 12000]:
html += create_activity_chart(bar_func, edits, "Bearbeitungen", "#4ca4d3")
html += '</div>'
display(HTML(html))
def polarToCartesian(centerX, centerY, radius, angleInDegrees):
angleInRadians = (angleInDegrees-90) * math.pi / 180.0
return ( centerX + (radius * math.cos(angleInRadians)),
centerY + (radius * math.sin(angleInRadians)))
def circle(x, y, radius, color, progress=1):
startAngle = 275
endAngle = 274 + 360*progress
start = polarToCartesian(x, y, radius, endAngle)
end = polarToCartesian(x, y, radius, startAngle)
largeArcFlag = "0" if endAngle - startAngle <= 180 else "1"
d = " ".join(map(str, [
"M", start[0], start[1],
"A", radius, radius, 0, largeArcFlag, 0, end[0], end[1]
]))
return """
<path d="%s" fill="none" stroke-linecap="round"
stroke="%s" stroke-width="%s" />
""" % (d, color, 13)
display_example(circle)
###Output
_____no_output_____
###Markdown
Dreiecke
###Code
def get_path_from_points(points, progress, color, width):
d = "M%s %s " % points[-1]
for i in range(len(points)):
point_progress = min(progress * len(points) - i, 1)
if point_progress >= 0:
next_point = (
points[i-1][0] + point_progress * (points[i][0]-points[i-1][0]),
points[i-1][1] + point_progress * (points[i][1]-points[i-1][1])
)
d += "L%s %s" % next_point
return """
<path d="%s" fill="none"
stroke-linejoin="round"
stroke-linecap="round"
stroke="%s" stroke-width="%s" />
""" % (d, color, width)
def triangle(x, y, radius, color, progress):
points = [
polarToCartesian(x, y, radius, 0),
polarToCartesian(x, y, radius, 120),
polarToCartesian(x, y, radius, 240)
]
return get_path_from_points(points, progress, color, 7.5)
display_example(triangle)
###Output
_____no_output_____
###Markdown
Quadrate (Variante 1)
###Code
def square1(x, y, radius, color, progress):
points = [
polarToCartesian(x, y, radius, -45),
polarToCartesian(x, y, radius, 45),
polarToCartesian(x, y, radius, 135),
polarToCartesian(x, y, radius, 135+90)
]
return get_path_from_points(points, progress, color, 9.5)
display_example(square1)
###Output
_____no_output_____
###Markdown
Quadrate (Variante 2)
###Code
def square2(x, y, radius, color, progress):
points = [
polarToCartesian(x, y, radius, 0),
polarToCartesian(x, y, radius, 90),
polarToCartesian(x, y, radius, 180),
polarToCartesian(x, y, radius, 270)
]
return get_path_from_points(points, progress, color, 9.5)
display_example(square2)
###Output
_____no_output_____
###Markdown
Weitere Möglichkeiten
###Code
import numpy as np
from IPython.display import Markdown
def get_variant(nr_points, angle_offset=0):
angles = np.linspace(0, 360, nr_points+1) + 360/nr_points * math.ceil(nr_points/2) + angle_offset
def create_paths(x, y, radius, color, progress):
points = [polarToCartesian(x, y, radius, angle) for angle in angles]
return get_path_from_points(points, progress, color, 10)
return create_paths
for n in range(5,11):
if n%2 == 1:
display(Markdown("### %s-Eck" % n))
display_example(get_variant(n))
else:
display(Markdown("### %s-Eck (Variante 1)" % n))
display_example(get_variant(n))
display(Markdown("### %s-Eck (Variante 2)" % n))
display_example(get_variant(n, 360/n/2))
###Output
_____no_output_____ |
examples_and_tutorials/tutorials/how_to_build_a_simple_agent.ipynb | ###Markdown
[](https://colab.research.google.com/github/facebookresearch/fairo/blob/master/tutorials/how_to_build_a_simple_agent.ipynb) How to build your own agent Build a simple agentIn this tutorial, we will build a simple agent that catches a randomly moving bot in a 5x5 grid world. The goal is to understand the high level organization of the droidlet agent. Control logicThe basic droidlet agent is made up of four major components: a perceptual API, a memory system, a controller, and a task queue. In each iteration of the event loop, the agent will run perceptual modules, updating the memory system with what it perceives, maybe place tasks into the task queue, and lastly, pop all finished tasks and then step the highest priority task.A typical event loop is as follows: > **while** True **do**>> run [perceptual modules](https://facebookresearch.github.io/fairo/perception.html), update [memory](https://facebookresearch.github.io/fairo/memory.html)>>>> step [controller](https://facebookresearch.github.io/fairo/controller.html)>>>> step highest priority [task](https://facebookresearch.github.io/fairo/tasks.html)<!--- **Perception**Perception modules is where the agent perceives the world it resides. Most of the perceptual modules in our example agents are visual: e.g. object detection and instance segmentation. You can customize your own perception modules and have it registered in the agent.All the information perception modules receive should go into agent's memory system. **Memory System**Memory system serves as the interface for passing information between the various components of the agent. It consists of an AgentMemory object which is the entry point to the underlying SQL database and some MemoryNodes which represents a particular entity or event. It stores and organizes information like: - player info- time info- program info- task info- etc. **Controller**Controller is where agent interpret commands, carry out dialogues and place tasks on the task stack. **Task queue**Task queue stores tasks, which are (mostly) self-contained lower-level world interactions (e.g. Move, Point). For each event loop, one task is poped out of task queue and got executed by the agent.--> Extend BaseAgent---The first you need to do is to extend the BaseAgent class and overwrite the following functions:
###Code
# grid_agent.py
class GridAgent(BaseAgent):
def __init__(self, world=None, opts=None):
self.world = world
self.pos = (0, 0, 0)
super(GridAgent, self).__init__(opts)
def init_memory(self):
pass
def init_perception(self):
pass
def init_controller(self):
pass
def perceive(self):
pass
def get_incoming_chats(self):
pass
def controller_step(self):
pass
def task_step(self, sleep_time=5):
pass
def handle_exception(self, e):
pass
def send_chat(self, chat):
pass
###Output
_____no_output_____
###Markdown
We will go over each components in the following sections. Create a simple 5x5 grid world---Note that in the above ```___init___``` function we are passing a world to GridAgent, which is a simulated 5x5(x1) gridworld which hosts our agent. We also put a simple bot named "target" in it; our agent will need to catch it.
###Code
# world.py
Bot = namedtuple("Bot", "entityId, name, pos, look")
class World:
def __init__(self, opts=None, spec=None):
target = Bot(1977, "target", Pos(3, 4, 0), Look(0, 0))
self.bots = [target]
def get_bots(self, eid=None):
bots = self.bots if eid is None else [b for b in self.bots if b.entityId == eid]
return bots
def remove_bot(self, eid):
self.bots[:] = [b for b in self.bots if b.entityId != eid]
###Output
_____no_output_____
###Markdown
Heuristic Perception---In order to catch the target, our agent needs to keep track of its location. We add a heuristic perception module that gets the position of all bots in the world and put them into memory. In a more sophisticated agent, the perceptual models might be mediated by more in-depth heuristics or machine-learned models; but they would interface the Memory system in a similar way.
###Code
# heuristic_perception.py
class HeuristicPerception:
def __init__(self, agent):
self.agent = agent
def perceive(self):
bots = self.agent.world.get_bots()
for bot in bots:
bot_node = self.agent.memory.get_player_by_eid(bot.entityId)
if bot_node is None:
memid = PlayerNode.create(self.agent.memory, bot)
bot_node = PlayerNode(self.agent.memory, memid)
self.agent.memory.tag(memid, "bot")
bot_node.update(self.agent.memory, bot, bot_node.memid)
###Output
_____no_output_____
###Markdown
Memory Module---To store and organize all the information, the agent needs a Memory Module. Here we just use [AgentMemory](https://facebookresearch.github.io/fairo/memory.htmlbase_agent.sql_memory.AgentMemory) of base_agent and use [PlayerNode](https://facebookresearch.github.io/fairo/memory.htmlmemorynodes) to represent the bot entity. You can also extend them and define your own Memory Nodes. Tasks---A [Task](https://facebookresearch.github.io/fairo/tasks.html) is a world interaction whose implementation might vary from platform to platform. **Simple Catch Task**We are going to create a simple Catch Task for our agent. We break it into two smaller subtasks: a Move Task and a Grab Task. In Move Task, the agent will simply head to a given position. The stop condition is when the agent is at the exact location of the target. It will move one block at a time to get close to the target until the stop condition is met.In Grab Task, the agent will simply grab the target physically. The stop condition is when the target has disappeared from the world.
###Code
# tasks.py
class Move(Task):
def __init__(self, agent, task_data):
super(Move, self).__init__()
self.target = task_data["target"]
def step(self, agent):
super().step(agent)
if self.finished:
return
agent.move(self.target[0], self.target[1], self.target[2])
self.finished = True
class Grab(Task):
def __init__(self, agent, task_data):
super(Grab, self).__init__()
self.target_eid = task_data["target_eid"]
def step(self, agent):
super().step(agent)
if self.finished:
return
if len(agent.world.get_bots(eid=self.target_eid)) > 0:
agent.catch(self.target_eid)
else:
self.finished = True
class Catch(Task):
def __init__(self, agent, task_data):
super(Catch, self).__init__()
self.target_memid = task_data["target_memid"]
def step(self, agent):
super().step(agent)
if self.finished:
return
# retrieve target info from memory:
target_mem = agent.memory.get_mem_by_id(self.target_memid)
# first get close to the target, one block at a time
tx, ty, tz = target_mem.get_pos()
x, y, z = agent.get_pos()
if np.linalg.norm(np.subtract((x, y, z), (tx, ty, tz))) > 0.:
if x != tx:
x += 1 if x - tx < 0 else -1
else:
y += 1 if y - ty < 0 else -1
move_task = Move(agent, {"target": (x, y, z)})
agent.memory.add_tick()
self.add_child_task(move_task, agent)
return
# once target is within reach, catch it!
grab_task = Grab(agent, {"target_eid": target_mem.eid})
agent.memory.add_tick()
self.add_child_task(grab_task, agent)
self.finished = True
###Output
_____no_output_____
###Markdown
Controller---The [Controller](https://facebookresearch.github.io/fairo/controller.html) decides which Tasks (if any) to put on the stack. In the [craftassist](https://github.com/facebookresearch/fairo/blob/main/craftassist/agent/craftassist_agent.py) and [locobot](https://github.com/facebookresearch/fairo/blob/main/locobot/agent/locobot_agent.py) agents, the controller is itself a modular, multipart system. In this tutorial, to keep things simple and self contained, the controller will just push the Catch task onto the stack. For more in-depth discussion about Controllers we use, look [here](https://facebookresearch.github.io/fairo/controller.html)
###Code
# grid_agent.py
class GridAgent(BaseAgent):
...
...
def controller_step(self):
bot_memids = self.memory.get_memids_by_tag("bot")
if self.memory.task_stack_peek() is None:
if bot_memids:
task_data = {"target_memid": bot_memids[0]}
self.memory.task_stack_push(Catch(self, task_data))
else:
exit()
###Output
_____no_output_____
###Markdown
Task Step---Here the agent steps the topmost Task on the Stack.
###Code
# grid_agent.py
class GridAgent(BaseAgent):
...
...
def task_step(self, sleep_time=5):
# clear finsihed tasks from stack
while (
self.memory.task_stack_peek() and self.memory.task_stack_peek().task.check_finished()
):
self.memory.task_stack_pop()
# do nothing if there's no task
if self.memory.task_stack_peek() is None:
return
# If something to do, step the topmost task
task_mem = self.memory.task_stack_peek()
if task_mem.memid != self.last_task_memid:
self.last_task_memid = task_mem.memid
task_mem.task.step(self)
self.memory.task_stack_update_task(task_mem.memid, task_mem.task)
###Output
_____no_output_____
###Markdown
Put it together---
###Code
# grid_agent.py
class GridAgent(BaseAgent):
def __init__(self, world=None, opts=None):
self.world = world
self.last_task_memid = None
self.pos = (0, 0, 0)
super(GridAgent, self).__init__(opts)
def init_memory(self):
self.memory = AgentMemory()
def init_perception(self):
self.perception_modules = {}
self.perception_modules['heuristic'] = HeuristicPerception(self)
def init_controller(self):
pass
def perceive(self):
self.world.step() # update world state
for perception_module in self.perception_modules.values():
perception_module.perceive()
def controller_step(self):
bot_memids = self.memory.get_memids_by_tag("bot")
if self.memory.task_stack_peek() is None:
if bot_memids:
task_data = {"target_memid": bot_memids[0]}
self.memory.task_stack_push(Catch(self, task_data))
logging.info(f"pushed Catch Task of bot with memid: {bot_memids[0]}")
else:
exit()
def task_step(self, sleep_time=5):
while (
self.memory.task_stack_peek() and self.memory.task_stack_peek().task.check_finished()
):
self.memory.task_stack_pop()
# do nothing if there's no task
if self.memory.task_stack_peek() is None:
return
# If something to do, step the topmost task
task_mem = self.memory.task_stack_peek()
if task_mem.memid != self.last_task_memid:
logging.info("Starting task {}".format(task_mem.task))
self.last_task_memid = task_mem.memid
task_mem.task.step(self)
self.memory.task_stack_update_task(task_mem.memid, task_mem.task)
self.world.visualize(self)
"""physical interfaces"""
def get_pos(self):
return self.pos
def move(self, x, y, z):
self.pos = (x, y, z)
return self.pos
def catch(self, target_eid):
bots = self.world.get_bots(eid=target_eid)
if len(bots) > 0:
bot = bots[0]
if np.linalg.norm(np.subtract(self.pos, bot.pos)) <1.0001:
self.world.remove_bot(target_eid)
###Output
_____no_output_____
###Markdown
Run the agentTo run the agent, you need to create a runtime populated with files we just created. Luckily we have already prepared one for you. Simply run the following command to pull it and install required packages.
###Code
!git clone https://github.com/facebookresearch/fairo.git && cd examples/grid && pip install -r requirements.py
###Output
_____no_output_____
###Markdown
Run it now!
###Code
%run agent/grid_agent.py
###Output
_____no_output_____
###Markdown
[](https://colab.research.google.com/github/facebookresearch/droidlet/blob/master/tutorials/how_to_build_a_simple_agent.ipynb) How to build your own agent Build a simple agentIn this tutorial, we will build a simple agent that catches a randomly moving bot in a 5x5 grid world. The goal is to understand the high level organization of the droidlet agent. Control logicThe basic droidlet agent is made up of four major components: a perceptual API, a memory system, a controller, and a task queue. In each iteration of the event loop, the agent will run perceptual modules, updating the memory system with what it perceives, maybe place tasks into the task queue, and lastly, pop all finished tasks and then step the highest priority task.A typical event loop is as follows: > **while** True **do**>> run [perceptual modules](https://facebookresearch.github.io/droidlet/perception.html), update [memory](https://facebookresearch.github.io/droidlet/memory.html)>>>> step [controller](https://facebookresearch.github.io/droidlet/controller.html)>>>> step highest priority [task](https://facebookresearch.github.io/droidlet/tasks.html)<!--- **Perception**Perception modules is where the agent perceives the world it resides. Most of the perceptual modules in our example agents are visual: e.g. object detection and instance segmentation. You can customize your own perception modules and have it registered in the agent.All the information perception modules receive should go into agent's memory system. **Memory System**Memory system serves as the interface for passing information between the various components of the agent. It consists of an AgentMemory object which is the entry point to the underlying SQL database and some MemoryNodes which represents a particular entity or event. It stores and organizes information like: - player info- time info- program info- task info- etc. **Controller**Controller is where agent interpret commands, carry out dialogues and place tasks on the task stack. **Task queue**Task queue stores tasks, which are (mostly) self-contained lower-level world interactions (e.g. Move, Point). For each event loop, one task is poped out of task queue and got executed by the agent.--> Extend BaseAgent---The first you need to do is to extend the BaseAgent class and overwrite the following functions:
###Code
# grid_agent.py
class GridAgent(BaseAgent):
def __init__(self, world=None, opts=None):
self.world = world
self.pos = (0, 0, 0)
super(GridAgent, self).__init__(opts)
def init_memory(self):
pass
def init_perception(self):
pass
def init_controller(self):
pass
def perceive(self):
pass
def get_incoming_chats(self):
pass
def controller_step(self):
pass
def task_step(self, sleep_time=5):
pass
def handle_exception(self, e):
pass
def send_chat(self, chat):
pass
###Output
_____no_output_____
###Markdown
We will go over each components in the following sections. Create a simple 5x5 grid world---Note that in the above ```___init___``` function we are passing a world to GridAgent, which is a simulated 5x5(x1) gridworld which hosts our agent. We also put a simple bot named "target" in it; our agent will need to catch it.
###Code
# world.py
Bot = namedtuple("Bot", "entityId, name, pos, look")
class World:
def __init__(self, opts=None, spec=None):
target = Bot(1977, "target", Pos(3, 4, 0), Look(0, 0))
self.bots = [target]
def get_bots(self, eid=None):
bots = self.bots if eid is None else [b for b in self.bots if b.entityId == eid]
return bots
def remove_bot(self, eid):
self.bots[:] = [b for b in self.bots if b.entityId != eid]
###Output
_____no_output_____
###Markdown
Heuristic Perception---In order to catch the target, our agent needs to keep track of its location. We add a heuristic perception module that gets the position of all bots in the world and put them into memory. In a more sophisticated agent, the perceptual models might be mediated by more in-depth heuristics or machine-learned models; but they would interface the Memory system in a similar way.
###Code
# heuristic_perception.py
class HeuristicPerception:
def __init__(self, agent):
self.agent = agent
def perceive(self):
bots = self.agent.world.get_bots()
for bot in bots:
bot_node = self.agent.memory.get_player_by_eid(bot.entityId)
if bot_node is None:
memid = PlayerNode.create(self.agent.memory, bot)
bot_node = PlayerNode(self.agent.memory, memid)
self.agent.memory.tag(memid, "bot")
bot_node.update(self.agent.memory, bot, bot_node.memid)
###Output
_____no_output_____
###Markdown
Memory Module---To store and organize all the information, the agent needs a Memory Module. Here we just use [AgentMemory](https://facebookresearch.github.io/droidlet/memory.htmlbase_agent.sql_memory.AgentMemory) of base_agent and use [PlayerNode](https://facebookresearch.github.io/droidlet/memory.htmlmemorynodes) to represent the bot entity. You can also extend them and define your own Memory Nodes. Tasks---A [Task](https://facebookresearch.github.io/droidlet/tasks.html) is a world interaction whose implementation might vary from platform to platform. **Simple Catch Task**We are going to create a simple Catch Task for our agent. We break it into two smaller subtasks: a Move Task and a Grab Task. In Move Task, the agent will simply head to a given position. The stop condition is when the agent is at the exact location of the target. It will move one block at a time to get close to the target until the stop condition is met.In Grab Task, the agent will simply grab the target physically. The stop condition is when the target has disappeared from the world.
###Code
# tasks.py
class Move(Task):
def __init__(self, agent, task_data):
super(Move, self).__init__()
self.target = task_data["target"]
def step(self, agent):
super().step(agent)
if self.finished:
return
agent.move(self.target[0], self.target[1], self.target[2])
self.finished = True
class Grab(Task):
def __init__(self, agent, task_data):
super(Grab, self).__init__()
self.target_eid = task_data["target_eid"]
def step(self, agent):
super().step(agent)
if self.finished:
return
if len(agent.world.get_bots(eid=self.target_eid)) > 0:
agent.catch(self.target_eid)
else:
self.finished = True
class Catch(Task):
def __init__(self, agent, task_data):
super(Catch, self).__init__()
self.target_memid = task_data["target_memid"]
def step(self, agent):
super().step(agent)
if self.finished:
return
# retrieve target info from memory:
target_mem = agent.memory.get_mem_by_id(self.target_memid)
# first get close to the target, one block at a time
tx, ty, tz = target_mem.get_pos()
x, y, z = agent.get_pos()
if np.linalg.norm(np.subtract((x, y, z), (tx, ty, tz))) > 0.:
if x != tx:
x += 1 if x - tx < 0 else -1
else:
y += 1 if y - ty < 0 else -1
move_task = Move(agent, {"target": (x, y, z)})
agent.memory.add_tick()
self.add_child_task(move_task, agent)
return
# once target is within reach, catch it!
grab_task = Grab(agent, {"target_eid": target_mem.eid})
agent.memory.add_tick()
self.add_child_task(grab_task, agent)
self.finished = True
###Output
_____no_output_____
###Markdown
Controller---The [Controller](https://facebookresearch.github.io/droidlet/controller.html) decides which Tasks (if any) to put on the stack. In the [craftassist](https://github.com/facebookresearch/droidlet/blob/main/craftassist/agent/craftassist_agent.py) and [locobot](https://github.com/facebookresearch/droidlet/blob/main/locobot/agent/locobot_agent.py) agents, the controller is itself a modular, multipart system. In this tutorial, to keep things simple and self contained, the controller will just push the Catch task onto the stack. For more in-depth discussion about Controllers we use, look [here](https://facebookresearch.github.io/droidlet/controller.html)
###Code
# grid_agent.py
class GridAgent(BaseAgent):
...
...
def controller_step(self):
bot_memids = self.memory.get_memids_by_tag("bot")
if self.memory.task_stack_peek() is None:
if bot_memids:
task_data = {"target_memid": bot_memids[0]}
self.memory.task_stack_push(Catch(self, task_data))
else:
exit()
###Output
_____no_output_____
###Markdown
Task Step---Here the agent steps the topmost Task on the Stack.
###Code
# grid_agent.py
class GridAgent(BaseAgent):
...
...
def task_step(self, sleep_time=5):
# clear finsihed tasks from stack
while (
self.memory.task_stack_peek() and self.memory.task_stack_peek().task.check_finished()
):
self.memory.task_stack_pop()
# do nothing if there's no task
if self.memory.task_stack_peek() is None:
return
# If something to do, step the topmost task
task_mem = self.memory.task_stack_peek()
if task_mem.memid != self.last_task_memid:
self.last_task_memid = task_mem.memid
task_mem.task.step(self)
self.memory.task_stack_update_task(task_mem.memid, task_mem.task)
###Output
_____no_output_____
###Markdown
Put it together---
###Code
# grid_agent.py
class GridAgent(BaseAgent):
def __init__(self, world=None, opts=None):
self.world = world
self.last_task_memid = None
self.pos = (0, 0, 0)
super(GridAgent, self).__init__(opts)
def init_memory(self):
self.memory = AgentMemory()
def init_perception(self):
self.perception_modules = {}
self.perception_modules['heuristic'] = HeuristicPerception(self)
def init_controller(self):
pass
def perceive(self):
self.world.step() # update world state
for perception_module in self.perception_modules.values():
perception_module.perceive()
def controller_step(self):
bot_memids = self.memory.get_memids_by_tag("bot")
if self.memory.task_stack_peek() is None:
if bot_memids:
task_data = {"target_memid": bot_memids[0]}
self.memory.task_stack_push(Catch(self, task_data))
logging.info(f"pushed Catch Task of bot with memid: {bot_memids[0]}")
else:
exit()
def task_step(self, sleep_time=5):
while (
self.memory.task_stack_peek() and self.memory.task_stack_peek().task.check_finished()
):
self.memory.task_stack_pop()
# do nothing if there's no task
if self.memory.task_stack_peek() is None:
return
# If something to do, step the topmost task
task_mem = self.memory.task_stack_peek()
if task_mem.memid != self.last_task_memid:
logging.info("Starting task {}".format(task_mem.task))
self.last_task_memid = task_mem.memid
task_mem.task.step(self)
self.memory.task_stack_update_task(task_mem.memid, task_mem.task)
self.world.visualize(self)
"""physical interfaces"""
def get_pos(self):
return self.pos
def move(self, x, y, z):
self.pos = (x, y, z)
return self.pos
def catch(self, target_eid):
bots = self.world.get_bots(eid=target_eid)
if len(bots) > 0:
bot = bots[0]
if np.linalg.norm(np.subtract(self.pos, bot.pos)) <1.0001:
self.world.remove_bot(target_eid)
###Output
_____no_output_____
###Markdown
Run the agentTo run the agent, you need to create a runtime populated with files we just created. Luckily we have already prepared one for you. Simply run the following command to pull it and install required packages.
###Code
!git clone https://github.com/facebookresearch/droidlet.git && cd examples/grid && pip install -r requirements.py
###Output
_____no_output_____
###Markdown
Run it now!
###Code
%run agent/grid_agent.py
###Output
_____no_output_____ |
module08/kevin/HW08_KevinEgedy.ipynb | ###Markdown
Recommendation SystemIn this lab, we will use a python package named [Surprise](http://surpriselib.com/), which is an easy-to-use Python scikit for recommendation systems. It includes several commonly used algorithms, including [collaborative filtering](https://surprise.readthedocs.io/en/stable/knn_inspired.html) and [Matrix Factorization-based algorithms](https://surprise.readthedocs.io/en/stable/matrix_factorization.html).
###Code
# # install packages
# import sys
#!pip3 install scikit-surprise
from surprise.prediction_algorithms.matrix_factorization import SVD
from surprise.prediction_algorithms.knns import KNNBasic
from surprise.prediction_algorithms.knns import KNNWithMeans
from surprise.prediction_algorithms.knns import KNNBaseline
from surprise import Dataset
from surprise import accuracy
from surprise.model_selection import cross_validate
from surprise.model_selection import train_test_split
from surprise.model_selection import GridSearchCV
###Output
_____no_output_____
###Markdown
----- Load data from package surprise First, we can download the ml-100k dataset included in package surprise. The data will be saved in the .surprise_data folder in your home directory. Use the API in the package to sample random trainset and testset where test set is made of 20% of the ratings.
###Code
# Load the movielens-100k dataset (download it if needed) and split the data into
data = Dataset.load_builtin('ml-100k')
# sample random trainset and testset where test set is made of 20% of the ratings.
trainset, testset = train_test_split(data, test_size=0.20)
print("Number of users: {}".format(trainset.n_users))
print("Number of items: {}".format(trainset.n_items))
print("Number of ratings: {}".format(trainset.n_ratings))
###Output
Number of users: 943
Number of items: 1649
Number of ratings: 80000
###Markdown
----- Collaborative FilteringFirst, we will apply three different flavors of collaborative filtering to this data and evaluate their performances using RMSE and MAE. For each of these algorithms, the actual number of neighbors that are aggregated to compute an estimation is necessarily less than or equal to `𝑘`. The basic collaborative filtering algorithm**TODO**: You will study the [KNNBasic](https://surprise.readthedocs.io/en/stable/knn_inspired.html) API, choose the number of neighbors and the similarity measure, train the model based on training dataset and make predictions on the test dataset. Finally, you will evaluate the model performance based on RMSE and MAE. Try to play around with the different number of neighbors in the algorithm as well as the different similarity measure and see how it impacts the model performance.
###Code
# Use the basic collaborative filtering algorithm.
# See https://surprise.readthedocs.io/en/stable/knn_inspired.html for more details.
# TODO
# Reference: https://realpython.com/build-recommendation-engine-collaborative-filtering/#:~:text=Collaborative%20filtering%20is%20a%20family,on%20ratings%20of%20similar%20users.&text=It%20is%20calculated%20only%20on,user%20gives%20to%20an%20item.
sim_options = {
"name": ["msd", "cosine", "pearson", "pearson_baseline"],
"user_based": [False],
}
param_grid = {"sim_options":sim_options}
knnbasic = GridSearchCV(KNNBasic, param_grid, measures=["rmse", "mae"],refit=True)
knnbasic.fit(data)
import pprint
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(knnbasic.best_score)
pp.pprint(knnbasic.best_params)
knnbasic.predict('A', 1)
###Output
_____no_output_____
###Markdown
The basic collaborative filtering algorithm with user mean ratings**TODO**: A variation of the basic CF model is to take into account the mean ratings of each user. You will study the [KNNWithMeans](https://surprise.readthedocs.io/en/stable/knn_inspired.html) API, choose the number of neighbors and the similarity measure, train the model based on training dataset and make predictions on the test dataset. Finally, you will evaluate the model performance based on RMSE and MAE. Try to play around with the different number of neighbors in the algorithm as well as the different similarity measure and see how it impacts the model performance.
###Code
# Use the basic collaborative filtering algorithm, taking into account the mean ratings of each user.
# See https://surprise.readthedocs.io/en/stable/knn_inspired.html for more details.
# TODO
sim_options = {
"name": ["msd", "cosine", "pearson", "pearson_baseline"],
"user_based": [True],
"min_support": [3, 4, 5],
}
param_grid = {"sim_options":sim_options}
knnmeans = GridSearchCV(KNNWithMeans, param_grid, measures=["rmse", "mae"],refit=True)
knnmeans.fit(data)
pp.pprint(knnmeans.best_score)
pp.pprint(knnmeans.best_params)
knnmeans.predict('A', 1)
###Output
_____no_output_____
###Markdown
----- Matrix FactorizationThen, we will explore the matrix factorization techniques for recommendation. Matrix factorization algorithms work by decomposing the user-item interaction matrix into the product of two lower dimensionality rectangular matrices. The famous SVD algorithm for matrix factorization is popularized by Simon Funk during the Netflix Prize. **TODO**: in this task, you will use the famous SVD algorithm for the implementation of the matrix factorization modeo. You will study the [SVD](https://surprise.readthedocs.io/en/stable/matrix_factorization.html) API, choose the number of neighbors and the similarity measure, train the model based on training dataset and make predictions on the test dataset. Finally, you will evaluate the model performance based on RMSE and MAE. Try to play around with different number of factors and also try the [SVD++ algorithm](https://surprise.readthedocs.io/en/stable/matrix_factorization.html) and [Non-negative Matrix Factorization](https://surprise.readthedocs.io/en/stable/matrix_factorization.html) to see if you can imporve the model preformance.
###Code
# We'll use the famous SVD algorithm.
# TODO
param_grid = {
"n_epochs": [5, 10],
"lr_all": [0.002, 0.005],
"reg_all": [0.4, 0.6]
}
svd = GridSearchCV(SVD, param_grid, measures=["rmse", "mae"],refit=True)
svd.fit(data)
pp.pprint(svd.best_score)
pp.pprint(svd.best_params)
svd.predict('A', 1)
###Output
_____no_output_____
###Markdown
[BONUS] Implement your own version of User-User or Item-Item Collaborative Filtering and compare its performance against the surprise package's implementation.
###Code
# TODO
###Output
_____no_output_____ |
03-entrance_exam/Applying_Rules_MLXtend.ipynb | ###Markdown
In this notebook I am going to use other features from Apriori to mining rules
###Code
import pandas as pd
ready = pd.read_csv('source/ready_data.csv')
transactions = []
for i in range(0, len(ready)):
transactions.append([str(ready.values[i, j]) for j in range(0, len(ready.columns))])
len(transactions)
from mlxtend.preprocessing import TransactionEncoder
te = TransactionEncoder()
te_ary = te.fit(transactions).transform(transactions)
df = pd.DataFrame(te_ary, columns=te.columns_)
df
from mlxtend.frequent_patterns import apriori
rules = apriori(df, min_support=0.3, use_colnames=True)
rules
rules
itemsets_bigger_than_1 = apriori(df, min_support=0.5, use_colnames=True)
itemsets_bigger_than_1['length'] = itemsets_bigger_than_1['itemsets'].apply(lambda x: len(x))
itemsets_bigger_than_1 = itemsets_bigger_than_1[itemsets_bigger_than_1['length'] > 1]
itemsets_bigger_than_1
###Output
_____no_output_____ |
Courses/TensorFlow in Practice/Introduction to TensorFlow for Artificial Intelligence, Machine Learning, and Deep Learning/Week4/utf-8''Exercise4-Question.ipynb | ###Markdown
Below is code with a link to a happy or sad dataset which contains 80 images, 40 happy and 40 sad. Create a convolutional neural network that trains to 100% accuracy on these images, which cancels training upon hitting training accuracy of >.999Hint -- it will work best with 3 convolutional layers.
###Code
import tensorflow as tf
import os
import zipfile
from os import path, getcwd, chdir
# DO NOT CHANGE THE LINE BELOW. If you are developing in a local
# environment, then grab happy-or-sad.zip from the Coursera Jupyter Notebook
# and place it inside a local folder and edit the path to that location
path = f"{getcwd()}/../tmp2/happy-or-sad.zip"
zip_ref = zipfile.ZipFile(path, 'r')
zip_ref.extractall("/tmp/h-or-s")
zip_ref.close()
# GRADED FUNCTION: train_happy_sad_model
def train_happy_sad_model():
# Please write your code only where you are indicated.
# please do not remove # model fitting inline comments.
DESIRED_ACCURACY = 0.999
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('acc')>=DESIRED_ACCURACY):
print("\nReached 99.8% accuracy so cancelling training!")
self.model.stop_training = True
callbacks = myCallback()
# This Code Block should Define and Compile the Model. Please assume the images are 150 X 150 in your implementation.
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
# The second convolution
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The third convolution
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The fourth convolution
#tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
#tf.keras.layers.MaxPooling2D(2,2),
# The fifth convolution
#tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
#tf.keras.layers.MaxPooling2D(2,2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
from tensorflow.keras.optimizers import RMSprop
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.001),
metrics=['acc'])
# This code block should create an instance of an ImageDataGenerator called train_datagen
# And a train_generator by calling train_datagen.flow_from_directory
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1/255)
# Please use a target_size of 150 X 150.
train_generator = train_datagen.flow_from_directory(
'/tmp/h-or-s/', # This is the source directory for training images
target_size=(150, 150), # All images will be resized to 150x150
batch_size=128,
class_mode='binary')
# Expected output: 'Found 80 images belonging to 2 classes'
# This code block should call model.fit_generator and train for
# a number of epochs.
# model fitting
history = model.fit_generator(
train_generator,
steps_per_epoch=8,
epochs=15,
callbacks = [callbacks])
# model fitting
return history.history['acc'][-1]
# The Expected output: "Reached 99.9% accuracy so cancelling training!""
train_happy_sad_model()
# Now click the 'Submit Assignment' button above.
# Once that is complete, please run the following two cells to save your work and close the notebook
%%javascript
<!-- Save the notebook -->
IPython.notebook.save_checkpoint();
%%javascript
<!-- Shutdown and close the notebook -->
window.onbeforeunload = null
window.close();
IPython.notebook.session.delete();
###Output
_____no_output_____ |
_notebooks/2021-11-17-A-Flight-Through-a-Terra-Wormhole.ipynb | ###Markdown
A Flight Through a Terra Wormhole> There's a new bridge in town- toc:true- branch: master- badges: true- comments: false- author: Scott Simpson- categories: [Terra, Wormhole]- hide: false
###Code
#hide
#Imports & settings
!pip install plotly --upgrade
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
%matplotlib inline
%load_ext google.colab.data_table
%load_ext rpy2.ipython
%R options(tidyverse.quiet = TRUE)
%R options(lubridate.quiet = TRUE)
%R options(jsonlite.quiet = TRUE)
%R suppressMessages(library(tidyverse))
%R suppressMessages(library(lubridate))
%R suppressMessages(library(jsonlite))
%R suppressMessages(options(dplyr.summarise.inform = FALSE))
#hide
%%R
#Grab wormhole bridge query from Flipside
df_wh = fromJSON('https://api.flipsidecrypto.com/api/v2/queries/355ce3fd-4e1a-4043-89e7-eddbdbff3aa7/data/latest', simplifyDataFrame = TRUE)
#fix the column names
names(df_wh)<-tolower(names(df_wh))
#Change the date to date format
df_wh$block_timestamp <- parse_datetime(df_wh$block_timestamp)
#Grab eth shuttle bridge query from Flipside
df_sh_eth = fromJSON('https://api.flipsidecrypto.com/api/v2/queries/97d5a217-63e2-465b-a2a0-b7013678678b/data/latest', simplifyDataFrame = TRUE)
#Grab bsc receive shuttle bridge query from Flipside
df_sh_bsc_r = fromJSON('https://api.flipsidecrypto.com/api/v2/queries/17027191-c807-4709-b8a2-66b12e7e0d74/data/latest', simplifyDataFrame = TRUE)
#Grab bsc send shuttle bridge query from Flipside
df_sh_bsc_s = fromJSON('https://api.flipsidecrypto.com/api/v2/queries/10fdf58b-4e5c-4f9a-a492-550062101b40/data/latest', simplifyDataFrame = TRUE)
#Grab harmony shuttle bridge query from Flipside
df_sh_harm = fromJSON('https://api.flipsidecrypto.com/api/v2/queries/0252f148-8572-45a7-8f27-a344f1a69b51/data/latest', simplifyDataFrame = TRUE)
#pull into one table
df_sh <- df_sh_eth %>%
bind_rows(df_sh_bsc_r) %>%
bind_rows(df_sh_bsc_s) %>%
bind_rows(df_sh_harm)
#tidy up
rm(df_sh_bsc_r)
rm(df_sh_bsc_s)
rm(df_sh_eth)
rm(df_sh_harm)
#fix the column names
names(df_sh)<-tolower(names(df_sh))
df_sh <- df_sh %>% rename(bridge_chain_id = shuttle_chain_id)
df_wh <- df_wh %>% rename(bridge_chain_id = wormhole_chain_id)
#Change the date to date format
df_sh$block_timestamp <- parse_datetime(df_sh$block_timestamp)
#join chain ids
chains <- tibble(bridge_chain_id = c(0,1,2,3,4,5,6),
bridge_chain = c('Unknown','Solana','Ethereum','Terra','BSC','Polygon','Harmony'))
df_sh <- df_sh %>% left_join(chains, by = "bridge_chain_id")
df_wh <- df_wh %>% left_join(chains, by = "bridge_chain_id")
#merge the tables
df_sh$bridge <- 'Shuttle'
df_wh$bridge <- 'Wormhole'
df <- df_sh %>% bind_rows(df_wh)
#create a date field
df$date <- floor_date(df$block_timestamp, unit = 'day')
# clip by date to remove the last part date
df <- df %>% filter(date < '2021-11-17')
#Grab the token labels
labels <- read_csv("https://github.com/scottincrypto/analytics/raw/master/data/wormhole_bridge_assets.csv", show_col_types = FALSE)
#join the token lables
df <- df %>%
left_join(labels, by = "denom")
#transactions per day
tx_by_day <- df %>%
group_by(bridge, date) %>%
summarise(tx_count = n(),
user_count = n_distinct(user)
)
#destinations by day
tx_by_day_chain <- df %>%
group_by(bridge, date, bridge_chain) %>%
summarise(tx_count = n(),
user_count = n_distinct(user)
)
#coins by day
tx_by_day_coin <- df %>%
group_by(bridge, date, group2, group2_order) %>%
summarise(tx_count = n(),
user_count = n_distinct(user)
)
#transactions by day as a percentage of total
tx_by_day_percent <- tx_by_day %>% select(-tx_count) %>% pivot_wider(names_from = bridge, values_from = user_count, values_fill = 0) %>%
mutate('Shuttle Bridge' = Shuttle / (Shuttle + Wormhole) * 100, 'Wormhole Bridge' = Wormhole / (Shuttle + Wormhole) * 100) %>%
select(-Shuttle, -Wormhole) %>% pivot_longer(cols=-date, names_to = "bridge", values_to = "percentage") %>%
arrange(date, desc(bridge))
###Output
_____no_output_____
###Markdown
The Wormhole BridgeThe Terra Network, up until recently, has had limited options for blockchain-blockchain transfer of assets. The official Terra Bridge, known as the Shuttle Bridge, has served Terra with bridges to Ethereum, Binance Smart Chain (BSC) and the Harmony blockchains. The Shuttle Bridge is a centralised conduit between chains, with liquidity held by multisig wallets. Terraform Labs put forward the plan to migrate to a decentralised alternative - the Wormhole Network.The Wormhole Network is a cross chain network which solves the problem of inter-chain communication between blockchains which have different consensus mechanisms. It operates a set of Guardian nodes which perform attestations on the consensus operations of the attached chains. This simple model removes the need to run a consensus protocol on Wormhole - it is simply a network of oracles reporting on what happens on the attached chains. Thus Wormhole is able to manage communications between networks with very different consensus models - in particular, Solana.The Columbus-5 upgrade to Terra provided the infrastructure for Wormhole to interoperate with the network. This went live in late September 2021 on the Terra Network. In October 2021, the Wormhole V2 network went live, providing a bridge between Terra, Ethereum, BSC and Solana, with Polygon added a few weeks later. We will examine the uptake of this exciting new product.[](https://medium.com/terra-money/terra-goes-live-on-wormhole-v2-12df49d446d2) Transaction VolumeThe data used in this analysis is based on transactions to & from the Terra network using either the Shuttle Bridges (one each for Ethereum, BSC and Harmony) or the Wormhole Bridge (a single contract endpoint for all destinations). The chart below shows the number of transactions across the two bridge networks since the Columbus-5 upgrade on Terra on September 30 2021. Here we see a large increase in transactions across the Shuttle bridge in the weeks following the Columbus-5 upgrade. This settled down to a steady level of around 3000 per day prior to the Wormhole launch. Wormhole transactions quickly jumped up to 200-300 per day and remained steady there. The additional transactions don't appear to have had a dramatic impact on the overall number of bridge transactions.
###Code
#hide_input
#Plot the proportion of transactions
df_rel_debt = %R tx_by_day %>% arrange(desc(bridge))
fig = px.area(df_rel_debt
, x="date"
, y="tx_count"
, color="bridge"
, template="simple_white", width=800, height=800/1.618
, title= "Bridge Transactions by Bridge")
fig.update_yaxes(title_text='Count of all Bridge Transactions')
fig.update_xaxes(title_text=None)
fig.update_layout(legend=dict(
yanchor="top",
y=0.90,
xanchor="right",
x=0.99
))
fig.update_layout(legend_title_text=None)
fig.show()
###Output
_____no_output_____
###Markdown
If we examine the above data, but plot each bridge as a percentage of the total, we see that the Wormhole bridge traffic is around 6-9% of the total bridge traffic to & from Terra. The prior observations of this being a steady state hold - there doesn't appear to be any dramatic ramp-up in traffic.
###Code
#hide_input
#Plot the percentages of transactions
df_rel_debt = %R tx_by_day_percent
fig = px.area(df_rel_debt
, x="date"
, y="percentage"
, color="bridge"
, template="simple_white", width=800, height=800/1.618
, title= "Percentage of Bridge Transactions by Bridge")
fig.update_yaxes(title_text='% of all Bridge Transactions')
fig.update_xaxes(title_text=None)
fig.update_layout(legend=dict(
yanchor="top",
y=0.90,
xanchor="right",
x=0.99
))
fig.update_layout(legend_title_text=None)
fig.show()
###Output
_____no_output_____
###Markdown
User Adoption RateUsing the same dataset, we can examine the number of unique wallets on each day which interact with the bridges. Whilst people can have multiple wallets, this is the best proxy we have for user adoption. The chart below shows the number of wallets interacting with the bridge on each day. Wormhole appears to be attracting 100-200 users per day, with it appearing to be steadily rising. This doesn't appear to be having a discernable impact on the number of users on the Shuttle bridge. It's possible these may be new bridge users - it's difficult to tell from this dataset but some further data will support this hypothesis.
###Code
#hide_input
# User Counts
df_p = %R tx_by_day %>% select(-tx_count) %>% pivot_wider(names_from = bridge, values_from = user_count, values_fill = 0) %>% arrange(date)
fig = make_subplots(rows=2, cols=1, subplot_titles=("Shuttle Bridge Wallets", "Wormhole Bridge Wallets"))
fig.append_trace(go.Scatter(x=df_p["date"], y=df_p["Shuttle"], name="Shuttle Wallets Count"), row=1, col=1)
fig.append_trace(go.Scatter(x=df_p["date"], y=df_p["Wormhole"], name="Wormhole Wallets Count"), row=2, col=1)
fig.update_layout(width=800, height=800/1.618, title_text="Bridge Wallet Count")
fig.update_layout(template="simple_white", showlegend=False)
fig.update_yaxes(title_text='Wallet Count', row=1, col=1)
fig.update_yaxes(title_text='Wallet Count', row=2, col=1)
fig.show()
###Output
_____no_output_____
###Markdown
Which Tokens are Transferred on the Bridges?The Shuttle bridge supports sending & receiving of wrapped Terra assets to the remote bridges. Wormhole extends this functionality and supports sending & receiving of many more assets from other chains. These exist as wrapped versions of the original token on the Terra chain.It should come as no surprise that the majority of bridge traffic is LUNA and UST - the two keystone assets on the Terra network. UST has the potential to be a major cross-chain decentralisd US dollar stablecoin, and LUNA is the volatile asset which gives users exposure to the success of the Terra Network. The following charts show the types of tokens which are sent & received on the two bridges, broken into groups as follows:* UST* LUNA* Other Terra Assets - Terra native assets like ANC, MIR, and KRT and Mirror assets like mAAPL and mTSLA* Non-Terra Assets - assets from other chains wrapped by Wormhole - whETH, whUSDT, whDOGE etcThe first chart below shows the number of transactions on the Shuttle Bridge split by the token type from the list above. UST accounts for most of the traffic, demonstrating its utility as a cross-chain stablecoin. A smaller number of transactions are LUNA (remembering that 1 LUNA ~ 50 UST) and an even smaller amount are the other Terra assets.
###Code
#hide_input
#Tokens Sent/Received via Wormhole
df_p = %R tx_by_day_coin %>% filter(bridge == "Shuttle") %>% arrange(group2_order)
fig = px.bar(df_p
, x = "date"
, y = "tx_count"
, color = 'group2'
, labels=dict(date="Date", tx_count="Transactions", group2='Token Type')
, title= "Transactions on Shuttle Bridge by Token Type"
, template="simple_white", width=800, height=800/1.618
)
fig.update_layout(legend=dict(
yanchor="top",
y=0.99,
xanchor="right",
x=0.99,
title_text=None
))
fig.update_yaxes(title_text='Transaction Count')
fig.update_xaxes(title_text=None)
fig.show()
###Output
_____no_output_____
###Markdown
The next chart shows the number of transactions by token type over the Wormhole bridge. The results are similar - most transactions are for UST. We can see the non-Terra assets with a small but consistent usage - there is some demand for new assets on Terra.
###Code
#hide_input
#Tokens Sent/Received via Wormhole
df_p = %R tx_by_day_coin %>% filter(bridge == "Wormhole") %>% arrange(group2_order)
fig = px.bar(df_p
, x = "date"
, y = "tx_count"
, color = 'group2'
, labels=dict(date="Date", tx_count="Transactions", group2='Token Type')
, title= "Transactions on Wormhole Bridge by Token Type"
, template="simple_white", width=800, height=800/1.618
)
fig.update_layout(legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01,
title_text=None
))
fig.update_yaxes(title_text='Transaction Count')
fig.update_xaxes(title_text=None)
fig.show()
###Output
_____no_output_____
###Markdown
What are the Destinations?The two bridges have a different set of linked chains, so we expect to see a difference in the data when we look at the source & destination chains. The Shuttle bridge is only linked to Ethereum, Binance Smart Chain (BSC) and Harmony, whereas Wormole supports Ethereum, BSC, Polygon and Solana. It is expected that Wormhole will expand to support more chains in the future. The Shuttle bridge shows most of the transactions are to or from the BSC chain. This behaviour can potentially be explained by the high fees associated with bridging to Ethereum in comparison to BSC. Whilst bridge fees themselves are modest, the gas costs for getting on or off at the Ethereum end of the bridge are very high.
###Code
#hide_input
#Destination Chain Sent via Shuttle (received not possible)
df_p = %R tx_by_day_chain %>% filter(bridge == "Shuttle") %>% filter(bridge_chain != "Unknown") %>% arrange(bridge_chain)
fig = px.bar(df_p
, x = "date"
, y = "tx_count"
, color = 'bridge_chain'
, labels=dict(date="Date", tx_count="Transactions", bridge_chain='Chain')
, title= "All Transactions on Shuttle Bridge by Source/Destination Chain"
, template="simple_white", width=800, height=800/1.618
)
fig.update_layout(legend=dict(
yanchor="top",
y=0.99,
xanchor="right",
x=0.99,
title_text=None
))
fig.update_yaxes(title_text='Transaction Count')
fig.update_xaxes(title_text=None)
fig.show()
###Output
_____no_output_____
###Markdown
The chart below shows all of the transactions sent on via the Wormhole bridge with their destination chains. Note that this data does not contain transactions received via Wormhole - the source chain is not visible in the interchain message which comes from the bridge to the Terra network. The data below is surprising - it doesn't mirror the Shuttle bridge data. We see that the overwhelming majority of transactions sent via Wormhole are to the Solana network. Users of the Shuttle bridge haven't migrated their bridging activities across to Wormhole - they have a new use case instead. It appears that the Wormhole bridge has immediately serviced an unmet need - transferring assets between Terra and Solana.A further insight can be drawn from these data - why haven't users migrated across to the Wormhole bridge from the Shuttle bridge? The fees on the Shuttle bridge are 0.1% of the transfer value (minimum fee 1UST) plus gas fees at either end, whereas the Wormhole fees are currently sub-cent plus gas. Wormhole is the cheaper option. It's possible that the Shuttle bridge users simply don't yet know about the newer, cheaper bridge. It may take further marketing or changing of the Shuttle bridge UI to move users to Wormhole.
###Code
#hide_input
#Destination Chain Sent via Wormhole (received not possible)
df_p = %R tx_by_day_chain %>% filter(bridge == "Wormhole") %>% filter(bridge_chain != "Unknown") %>% arrange(bridge_chain)
fig = px.bar(df_p
, x = "date"
, y = "tx_count"
, color = 'bridge_chain'
, labels=dict(date="Date", tx_count="Transactions", bridge_chain='Destination Chain')
, title= "Send Transactions on Wormhole Bridge by Destination Chain"
, template="simple_white", width=800, height=800/1.618
)
fig.update_layout(legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01,
title_text=None
))
fig.update_yaxes(title_text='Transaction Count')
fig.update_xaxes(title_text=None)
fig.show()
###Output
_____no_output_____ |
experiments/pedestrians/Result Analysis.ipynb | ###Markdown
Displacement Error Analysis
###Code
prior_work_fse_results = {
'ETH - Univ': OrderedDict([('Linear', 2.94), ('Vanilla LSTM', 2.41), ('Social LSTM', 2.35), ('Social Attention', 3.74)]),
'ETH - Hotel': OrderedDict([('Linear', 0.72), ('Vanilla LSTM', 1.91), ('Social LSTM', 1.76), ('Social Attention', 2.64)]),
'UCY - Univ': OrderedDict([('Linear', 1.59), ('Vanilla LSTM', 1.31), ('Social LSTM', 1.40), ('Social Attention', 0.52)]),
'UCY - Zara 1': OrderedDict([('Linear', 1.21), ('Vanilla LSTM', 0.88), ('Social LSTM', 1.00), ('Social Attention', 2.13)]),
'UCY - Zara 2': OrderedDict([('Linear', 1.48), ('Vanilla LSTM', 1.11), ('Social LSTM', 1.17), ('Social Attention', 3.92)]),
'Average': OrderedDict([('Linear', 1.59), ('Vanilla LSTM', 1.52), ('Social LSTM', 1.54), ('Social Attention', 2.59)])
}
# These are for a prediction horizon of 12 timesteps.
prior_work_ade_results = {
'ETH - Univ': OrderedDict([('Linear', 1.33), ('Vanilla LSTM', 1.09), ('Social LSTM', 1.09), ('Social Attention', 0.39)]),
'ETH - Hotel': OrderedDict([('Linear', 0.39), ('Vanilla LSTM', 0.86), ('Social LSTM', 0.79), ('Social Attention', 0.29)]),
'UCY - Univ': OrderedDict([('Linear', 0.82), ('Vanilla LSTM', 0.61), ('Social LSTM', 0.67), ('Social Attention', 0.20)]),
'UCY - Zara 1': OrderedDict([('Linear', 0.62), ('Vanilla LSTM', 0.41), ('Social LSTM', 0.47), ('Social Attention', 0.30)]),
'UCY - Zara 2': OrderedDict([('Linear', 0.77), ('Vanilla LSTM', 0.52), ('Social LSTM', 0.56), ('Social Attention', 0.33)]),
'Average': OrderedDict([('Linear', 0.79), ('Vanilla LSTM', 0.70), ('Social LSTM', 0.72), ('Social Attention', 0.30)])
}
linestyles = ['--', '-.', '-', ':']
mean_markers = 'X'
marker_size = 7
line_colors = ['#1f78b4','#33a02c','#fb9a99','#e31a1c']
area_colors = ['#80CBE5','#ABCB51', '#F05F78']
area_rgbs = list()
for c in area_colors:
area_rgbs.append([int(c[i:i+2], 16) for i in (1, 3, 5)])
# Load Ours
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}*attention_radius_3*fde_most_likely.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = alg_name
perf_df = perf_df.append(dataset_df, ignore_index=True, sort=False)
del perf_df['Unnamed: 0']
perf_df = perf_df.rename(columns={"metric": "error_type", "value": "error_value"})
# Load Trajectron and GAN
errors_df = pd.concat([pd.read_csv(f) for f in glob.glob('csv/old/curr_*_errors.csv')], ignore_index=True)
del errors_df['data_precondition']
errors_df = errors_df[~(errors_df['method'] == 'our_full')]
errors_df = errors_df[~(errors_df['error_type'] == 'mse')]
errors_df.loc[errors_df['error_type'] =='fse', 'error_type'] = 'fde'
#errors_df.loc[errors_df['error_type'] =='mse', 'error_type'] = 'ade'
errors_df.loc[errors_df['method'] == 'our_most_likely', 'method'] = 'Trajectron'
perf_df = perf_df.append(errors_df)
errors_df
with sns.color_palette("muted"):
fig_fse, ax_fses = plt.subplots(nrows=1, ncols=6, figsize=(8, 4), dpi=300, sharey=True)
for idx, ax_fse in enumerate(ax_fses):
dataset_name = dataset_names[idx]
if dataset_name != 'Average':
specific_df = perf_df[(perf_df['dataset'] == dataset_name) & (perf_df['error_type'] == 'fde')]
specific_df['dataset'] = pretty_dataset_name(dataset_name)
else:
specific_df = perf_df[(perf_df['error_type'] == 'fde')].copy()
specific_df['dataset'] = 'Average'
sns.boxplot(x='dataset', y='error_value', hue='method',
data=specific_df, ax=ax_fse, showfliers=False,
palette=area_colors, hue_order=['sgan', 'Trajectron', alg_name], width=2.)
ax_fse.get_legend().remove()
ax_fse.set_xlabel('')
ax_fse.set_ylabel('' if idx > 0 else 'Final Displacement Error (m)')
ax_fse.scatter([-0.665, 0, 0.665],
[np.mean(specific_df[specific_df['method'] == 'sgan']['error_value']),
np.mean(specific_df[specific_df['method'] == 'Trajectron']['error_value']),
np.mean(specific_df[specific_df['method'] == alg_name]['error_value'])],
s=marker_size*marker_size, c=np.asarray(area_rgbs)/255.0, marker=mean_markers,
edgecolors='#545454', zorder=10)
for baseline_idx, (baseline, fse_val) in enumerate(prior_work_fse_results[pretty_dataset_name(dataset_name)].items()):
ax_fse.axhline(y=fse_val, label=baseline, color=line_colors[baseline_idx], linestyle=linestyles[baseline_idx])
if idx == 0:
handles, labels = ax_fse.get_legend_handles_labels()
handles = [handles[0], handles[4], handles[1], handles[5], handles[2], handles[6], handles[3]]
labels = [labels[0], 'Social GAN', labels[1], 'Trajectron', labels[2], alg_name, labels[3]]
ax_fse.legend(handles, labels,
loc='lower center', bbox_to_anchor=(0.5, 0.9),
ncol=4, borderaxespad=0, frameon=False,
bbox_transform=fig_fse.transFigure)
# fig_fse.text(0.51, 0.03, 'Dataset', ha='center')
plt.savefig('plots/fde_boxplots.pdf', dpi=300, bbox_inches='tight')
del perf_df
del errors_df
###Output
_____no_output_____
###Markdown
Average Displacement Error
###Code
# Load Ours
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}*attention_radius_3*ade_most_likely.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = alg_name
perf_df = perf_df.append(dataset_df, ignore_index=True, sort=False)
del perf_df['Unnamed: 0']
perf_df = perf_df.rename(columns={"metric": "error_type", "value": "error_value"})
#perf_df.head()
# Load Trajectron and GAN
errors_df = pd.concat([pd.read_csv(f) for f in glob.glob('old/curr_*_errors.csv')], ignore_index=True)
del errors_df['data_precondition']
errors_df = errors_df[~(errors_df['method'] == 'our_full')]
errors_df = errors_df[~(errors_df['error_type'] == 'fse')]
#errors_df.loc[errors_df['error_type'] =='fse', 'error_type'] = 'fde'
errors_df.loc[errors_df['error_type'] =='mse', 'error_type'] = 'ade'
errors_df.loc[errors_df['method'] == 'our_most_likely', 'method'] = 'Trajectron'
perf_df = perf_df.append(errors_df)
del errors_df
with sns.color_palette("muted"):
fig_fse, ax_fses = plt.subplots(nrows=1, ncols=6, figsize=(8, 4), dpi=300, sharey=True)
for idx, ax_fse in enumerate(ax_fses):
dataset_name = dataset_names[idx]
if dataset_name != 'Average':
specific_df = perf_df[(perf_df['dataset'] == dataset_name) & (perf_df['error_type'] == 'ade')]
specific_df['dataset'] = pretty_dataset_name(dataset_name)
else:
specific_df = perf_df[(perf_df['error_type'] == 'ade')].copy()
specific_df['dataset'] = 'Average'
sns.boxplot(x='dataset', y='error_value', hue='method',
data=specific_df, ax=ax_fse, showfliers=False,
palette=area_colors, hue_order=['sgan', 'Trajectron', alg_name], width=2.)
ax_fse.get_legend().remove()
ax_fse.set_xlabel('')
ax_fse.set_ylabel('' if idx > 0 else 'Average Displacement Error (m)')
ax_fse.scatter([-0.665, 0, 0.665],
[np.mean(specific_df[specific_df['method'] == 'sgan']['error_value']),
np.mean(specific_df[specific_df['method'] == 'Trajectron']['error_value']),
np.mean(specific_df[specific_df['method'] == alg_name]['error_value'])],
s=marker_size*marker_size, c=np.asarray(area_rgbs)/255.0, marker=mean_markers,
edgecolors='#545454', zorder=10)
for baseline_idx, (baseline, fse_val) in enumerate(prior_work_ade_results[pretty_dataset_name(dataset_name)].items()):
ax_fse.axhline(y=fse_val, label=baseline, color=line_colors[baseline_idx], linestyle=linestyles[baseline_idx])
if idx == 0:
handles, labels = ax_fse.get_legend_handles_labels()
handles = [handles[0], handles[4], handles[1], handles[5], handles[2], handles[6], handles[3]]
labels = [labels[0], 'Social GAN', labels[1], 'Trajectron', labels[2], alg_name, labels[3]]
ax_fse.legend(handles, labels,
loc='lower center', bbox_to_anchor=(0.5, 0.9),
ncol=4, borderaxespad=0, frameon=False,
bbox_transform=fig_fse.transFigure)
# fig_fse.text(0.51, 0.03, 'Dataset', ha='center')
plt.savefig('plots/ade_boxplots.pdf', dpi=300, bbox_inches='tight')
del perf_df
###Output
_____no_output_____
###Markdown
KDE Negative Log Likelihood Attention Radius 3m
###Code
# Load Ours
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}_12*kde_full.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = alg_name
perf_df = perf_df.append(dataset_df, ignore_index=True)
del perf_df['Unnamed: 0']
#perf_df.head()
# # Load Trajectron and SGAN
# lls_df = pd.concat([pd.read_csv(f) for f in glob.glob('csv/old/curr_*_lls.csv')], ignore_index=True)
# lls_df.loc[lls_df['method'] == 'our_full', 'method'] = 'Trajectron'
# lls_df['error_type'] = 'KDE'
# #lls_df.head()
for dataset in dataset_names:
if dataset != 'Average':
print('KDE NLL for ' + pretty_dataset_name(dataset))
#print(f"SGAN: {-lls_df[(lls_df['method'] == 'sgan') & (lls_df['dataset'] == dataset)]['log-likelihood'].mean()}")
#print(f"Trajectron: {-lls_df[(lls_df['method'] == 'Trajectron') & (lls_df['dataset'] == dataset)]['log-likelihood'].mean()}")
print(f"{alg_name}: {perf_df[(perf_df['method'] == alg_name) & (perf_df['dataset'] == dataset)]['value'].mean()}")
else:
print('KDE NLL for ' + pretty_dataset_name(dataset))
#print(f"SGAN: {-lls_df[(lls_df['method'] == 'sgan')]['log-likelihood'].mean()}")
#print(f"Trajectron: {-lls_df[(lls_df['method'] == 'Trajectron')]['log-likelihood'].mean()}")
print(f"{alg_name}: {perf_df[(perf_df['method'] == alg_name)]['value'].mean()}")
del perf_df
###Output
_____no_output_____
###Markdown
Most Likely FDE Attention Radius 3m
###Code
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}_12*fde_most_likely.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = 'Trajectron++'
perf_df = perf_df.append(dataset_df, ignore_index=True, sort=False)
del perf_df['Unnamed: 0']
for dataset in dataset_names:
print('FDE Most Likely for ' + pretty_dataset_name(dataset))
if dataset != 'Average':
print(f"{alg_name}: {perf_df[(perf_df['method'] == 'Trajectron++') & (perf_df['dataset'] == dataset)]['value'].mean()}")
else:
print(f"{alg_name}: {perf_df[(perf_df['method'] == 'Trajectron++')]['value'].mean()}")
del perf_df
###Output
_____no_output_____
###Markdown
Most Likely Evaluation ADE Attention Radius 3m
###Code
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}_12*ade_most_likely.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = 'Trajectron++'
perf_df = perf_df.append(dataset_df, ignore_index=True, sort=False)
del perf_df['Unnamed: 0']
for dataset in dataset_names:
print('ADE Most Likely for ' + pretty_dataset_name(dataset))
if dataset != 'Average':
print(f"{alg_name}: {perf_df[(perf_df['method'] == 'Trajectron++') & (perf_df['dataset'] == dataset)]['value'].mean()}")
else:
print(f"{alg_name}: {perf_df[(perf_df['method'] == 'Trajectron++')]['value'].mean()}")
del perf_df
###Output
_____no_output_____
###Markdown
Best of 20 Evaluation FDE Attention Radius 3m
###Code
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}_12*fde_best_of.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = alg_name
perf_df = perf_df.append(dataset_df, ignore_index=True, sort=False)
del perf_df['Unnamed: 0']
for dataset in dataset_names:
print('FDE Best of 20 for ' + pretty_dataset_name(dataset))
if dataset != 'Average':
print(f"Trajectron++: {perf_df[(perf_df['method'] == alg_name) & (perf_df['dataset'] == dataset)]['value'].mean()}")
else:
print(f"Trajectron++: {perf_df[(perf_df['method'] == alg_name)]['value'].mean()}")
del perf_df
###Output
_____no_output_____
###Markdown
Best of 20 Evaluation ADE Attention Radius 3m
###Code
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}_12*ade_best_of.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = alg_name
perf_df = perf_df.append(dataset_df, ignore_index=True, sort=False)
del perf_df['Unnamed: 0']
for dataset in dataset_names:
print('FDE Best of 20 for ' + pretty_dataset_name(dataset))
if dataset != 'Average':
print(f"Trajectron++: {perf_df[(perf_df['method'] == alg_name) & (perf_df['dataset'] == dataset)]['value'].mean()}")
else:
print(f"Trajectron++: {perf_df[(perf_df['method'] == alg_name)]['value'].mean()}")
del perf_df
###Output
_____no_output_____
###Markdown
KDE Negative Log Likelihood Attention Radius 3m Velocity
###Code
# Load Ours
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}_vel_12*kde_full.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = alg_name
perf_df = perf_df.append(dataset_df, ignore_index=True)
del perf_df['Unnamed: 0']
#perf_df.head()
# # Load Trajectron and SGAN
# lls_df = pd.concat([pd.read_csv(f) for f in glob.glob('csv/old/curr_*_lls.csv')], ignore_index=True)
# lls_df.loc[lls_df['method'] == 'our_full', 'method'] = 'Trajectron'
# lls_df['error_type'] = 'KDE'
# #lls_df.head()
for dataset in dataset_names:
if dataset != 'Average':
print('KDE NLL for ' + pretty_dataset_name(dataset))
#print(f"SGAN: {-lls_df[(lls_df['method'] == 'sgan') & (lls_df['dataset'] == dataset)]['log-likelihood'].mean()}")
#print(f"Trajectron: {-lls_df[(lls_df['method'] == 'Trajectron') & (lls_df['dataset'] == dataset)]['log-likelihood'].mean()}")
print(f"{alg_name}: {perf_df[(perf_df['method'] == alg_name) & (perf_df['dataset'] == dataset)]['value'].mean()}")
else:
print('KDE NLL for ' + pretty_dataset_name(dataset))
#print(f"SGAN: {-lls_df[(lls_df['method'] == 'sgan')]['log-likelihood'].mean()}")
#print(f"Trajectron: {-lls_df[(lls_df['method'] == 'Trajectron')]['log-likelihood'].mean()}")
print(f"{alg_name}: {perf_df[(perf_df['method'] == alg_name)]['value'].mean()}")
del perf_df
###Output
_____no_output_____
###Markdown
Most Likely FDE Attention Radius 3m Velocity
###Code
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}_vel_12*fde_most_likely.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = 'Trajectron++'
perf_df = perf_df.append(dataset_df, ignore_index=True, sort=False)
del perf_df['Unnamed: 0']
for dataset in dataset_names:
print('FDE Most Likely for ' + pretty_dataset_name(dataset))
if dataset != 'Average':
print(f"{alg_name}: {perf_df[(perf_df['method'] == 'Trajectron++') & (perf_df['dataset'] == dataset)]['value'].mean()}")
else:
print(f"{alg_name}: {perf_df[(perf_df['method'] == 'Trajectron++')]['value'].mean()}")
del perf_df
###Output
_____no_output_____
###Markdown
Most Likely Evaluation ADE Attention Radius 3m Velocity
###Code
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}_vel_12*ade_most_likely.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = 'Trajectron++'
perf_df = perf_df.append(dataset_df, ignore_index=True, sort=False)
del perf_df['Unnamed: 0']
for dataset in dataset_names:
print('ADE Most Likely for ' + pretty_dataset_name(dataset))
if dataset != 'Average':
print(f"{alg_name}: {perf_df[(perf_df['method'] == 'Trajectron++') & (perf_df['dataset'] == dataset)]['value'].mean()}")
else:
print(f"{alg_name}: {perf_df[(perf_df['method'] == 'Trajectron++')]['value'].mean()}")
del perf_df
###Output
_____no_output_____
###Markdown
Best of 20 Evaluation FDE Attention Radius 3m Velocity
###Code
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}_vel_12*fde_best_of.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = alg_name
perf_df = perf_df.append(dataset_df, ignore_index=True, sort=False)
del perf_df['Unnamed: 0']
for dataset in dataset_names:
print('FDE Best of 20 for ' + pretty_dataset_name(dataset))
if dataset != 'Average':
print(f"Trajectron++: {perf_df[(perf_df['method'] == alg_name) & (perf_df['dataset'] == dataset)]['value'].mean()}")
else:
print(f"Trajectron++: {perf_df[(perf_df['method'] == alg_name)]['value'].mean()}")
del perf_df
###Output
_____no_output_____
###Markdown
Best of 20 Evaluation ADE Attention Radius 3m Velocity
###Code
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}_vel_12*ade_best_of.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = alg_name
perf_df = perf_df.append(dataset_df, ignore_index=True, sort=False)
del perf_df['Unnamed: 0']
for dataset in dataset_names:
print('FDE Best of 20 for ' + pretty_dataset_name(dataset))
if dataset != 'Average':
print(f"Trajectron++: {perf_df[(perf_df['method'] == alg_name) & (perf_df['dataset'] == dataset)]['value'].mean()}")
else:
print(f"Trajectron++: {perf_df[(perf_df['method'] == alg_name)]['value'].mean()}")
del perf_df
###Output
_____no_output_____
###Markdown
Displacement Error Analysis
###Code
prior_work_fse_results = {
'ETH - Univ': OrderedDict([('Linear', 2.94), ('Vanilla LSTM', 2.41), ('Social LSTM', 2.35), ('Social Attention', 3.74)]),
'ETH - Hotel': OrderedDict([('Linear', 0.72), ('Vanilla LSTM', 1.91), ('Social LSTM', 1.76), ('Social Attention', 2.64)]),
'UCY - Univ': OrderedDict([('Linear', 1.59), ('Vanilla LSTM', 1.31), ('Social LSTM', 1.40), ('Social Attention', 0.52)]),
'UCY - Zara 1': OrderedDict([('Linear', 1.21), ('Vanilla LSTM', 0.88), ('Social LSTM', 1.00), ('Social Attention', 2.13)]),
'UCY - Zara 2': OrderedDict([('Linear', 1.48), ('Vanilla LSTM', 1.11), ('Social LSTM', 1.17), ('Social Attention', 3.92)]),
'Average': OrderedDict([('Linear', 1.59), ('Vanilla LSTM', 1.52), ('Social LSTM', 1.54), ('Social Attention', 2.59)])
}
# These are for a prediction horizon of 12 timesteps.
prior_work_ade_results = {
'ETH - Univ': OrderedDict([('Linear', 1.33), ('Vanilla LSTM', 1.09), ('Social LSTM', 1.09), ('Social Attention', 0.39)]),
'ETH - Hotel': OrderedDict([('Linear', 0.39), ('Vanilla LSTM', 0.86), ('Social LSTM', 0.79), ('Social Attention', 0.29)]),
'UCY - Univ': OrderedDict([('Linear', 0.82), ('Vanilla LSTM', 0.61), ('Social LSTM', 0.67), ('Social Attention', 0.20)]),
'UCY - Zara 1': OrderedDict([('Linear', 0.62), ('Vanilla LSTM', 0.41), ('Social LSTM', 0.47), ('Social Attention', 0.30)]),
'UCY - Zara 2': OrderedDict([('Linear', 0.77), ('Vanilla LSTM', 0.52), ('Social LSTM', 0.56), ('Social Attention', 0.33)]),
'Average': OrderedDict([('Linear', 0.79), ('Vanilla LSTM', 0.70), ('Social LSTM', 0.72), ('Social Attention', 0.30)])
}
linestyles = ['--', '-.', '-', ':']
mean_markers = 'X'
marker_size = 7
line_colors = ['#1f78b4','#33a02c','#fb9a99','#e31a1c']
area_colors = ['#80CBE5','#ABCB51', '#F05F78']
area_rgbs = list()
for c in area_colors:
area_rgbs.append([int(c[i:i+2], 16) for i in (1, 3, 5)])
# Load Ours
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}*attention_radius_3*fde_most_likely.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = alg_name
perf_df = perf_df.append(dataset_df, ignore_index=True, sort=False)
del perf_df['Unnamed: 0']
perf_df = perf_df.rename(columns={"metric": "error_type", "value": "error_value"})
# Load Trajectron and GAN
errors_df = pd.concat([pd.read_csv(f) for f in glob.glob('csv/old/curr_*_errors.csv')], ignore_index=True)
del errors_df['data_precondition']
errors_df = errors_df[~(errors_df['method'] == 'our_full')]
errors_df = errors_df[~(errors_df['error_type'] == 'mse')]
errors_df.loc[errors_df['error_type'] =='fse', 'error_type'] = 'fde'
#errors_df.loc[errors_df['error_type'] =='mse', 'error_type'] = 'ade'
errors_df.loc[errors_df['method'] == 'our_most_likely', 'method'] = 'Trajectron'
perf_df = perf_df.append(errors_df)
errors_df
with sns.color_palette("muted"):
fig_fse, ax_fses = plt.subplots(nrows=1, ncols=6, figsize=(8, 4), dpi=300, sharey=True)
for idx, ax_fse in enumerate(ax_fses):
dataset_name = dataset_names[idx]
if dataset_name != 'Average':
specific_df = perf_df[(perf_df['dataset'] == dataset_name) & (perf_df['error_type'] == 'fde')]
specific_df['dataset'] = pretty_dataset_name(dataset_name)
else:
specific_df = perf_df[(perf_df['error_type'] == 'fde')].copy()
specific_df['dataset'] = 'Average'
sns.boxplot(x='dataset', y='error_value', hue='method',
data=specific_df, ax=ax_fse, showfliers=False,
palette=area_colors, hue_order=['sgan', 'Trajectron', alg_name], width=2.)
ax_fse.get_legend().remove()
ax_fse.set_xlabel('')
ax_fse.set_ylabel('' if idx > 0 else 'Final Displacement Error (m)')
ax_fse.scatter([-0.665, 0, 0.665],
[np.mean(specific_df[specific_df['method'] == 'sgan']['error_value']),
np.mean(specific_df[specific_df['method'] == 'Trajectron']['error_value']),
np.mean(specific_df[specific_df['method'] == alg_name]['error_value'])],
s=marker_size*marker_size, c=np.asarray(area_rgbs)/255.0, marker=mean_markers,
edgecolors='#545454', zorder=10)
for baseline_idx, (baseline, fse_val) in enumerate(prior_work_fse_results[pretty_dataset_name(dataset_name)].items()):
ax_fse.axhline(y=fse_val, label=baseline, color=line_colors[baseline_idx], linestyle=linestyles[baseline_idx])
if idx == 0:
handles, labels = ax_fse.get_legend_handles_labels()
handles = [handles[0], handles[4], handles[1], handles[5], handles[2], handles[6], handles[3]]
labels = [labels[0], 'Social GAN', labels[1], 'Trajectron', labels[2], alg_name, labels[3]]
ax_fse.legend(handles, labels,
loc='lower center', bbox_to_anchor=(0.5, 0.9),
ncol=4, borderaxespad=0, frameon=False,
bbox_transform=fig_fse.transFigure)
# fig_fse.text(0.51, 0.03, 'Dataset', ha='center')
plt.savefig('plots/fde_boxplots.pdf', dpi=300, bbox_inches='tight')
del perf_df
del errors_df
###Output
_____no_output_____
###Markdown
Average Displacement Error
###Code
# Load Ours
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}*attention_radius_3*ade_most_likely.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = alg_name
perf_df = perf_df.append(dataset_df, ignore_index=True, sort=False)
del perf_df['Unnamed: 0']
perf_df = perf_df.rename(columns={"metric": "error_type", "value": "error_value"})
#perf_df.head()
# Load Trajectron and GAN
errors_df = pd.concat([pd.read_csv(f) for f in glob.glob('old/curr_*_errors.csv')], ignore_index=True)
del errors_df['data_precondition']
errors_df = errors_df[~(errors_df['method'] == 'our_full')]
errors_df = errors_df[~(errors_df['error_type'] == 'fse')]
#errors_df.loc[errors_df['error_type'] =='fse', 'error_type'] = 'fde'
errors_df.loc[errors_df['error_type'] =='mse', 'error_type'] = 'ade'
errors_df.loc[errors_df['method'] == 'our_most_likely', 'method'] = 'Trajectron'
perf_df = perf_df.append(errors_df)
del errors_df
with sns.color_palette("muted"):
fig_fse, ax_fses = plt.subplots(nrows=1, ncols=6, figsize=(8, 4), dpi=300, sharey=True)
for idx, ax_fse in enumerate(ax_fses):
dataset_name = dataset_names[idx]
if dataset_name != 'Average':
specific_df = perf_df[(perf_df['dataset'] == dataset_name) & (perf_df['error_type'] == 'ade')]
specific_df['dataset'] = pretty_dataset_name(dataset_name)
else:
specific_df = perf_df[(perf_df['error_type'] == 'ade')].copy()
specific_df['dataset'] = 'Average'
sns.boxplot(x='dataset', y='error_value', hue='method',
data=specific_df, ax=ax_fse, showfliers=False,
palette=area_colors, hue_order=['sgan', 'Trajectron', alg_name], width=2.)
ax_fse.get_legend().remove()
ax_fse.set_xlabel('')
ax_fse.set_ylabel('' if idx > 0 else 'Average Displacement Error (m)')
ax_fse.scatter([-0.665, 0, 0.665],
[np.mean(specific_df[specific_df['method'] == 'sgan']['error_value']),
np.mean(specific_df[specific_df['method'] == 'Trajectron']['error_value']),
np.mean(specific_df[specific_df['method'] == alg_name]['error_value'])],
s=marker_size*marker_size, c=np.asarray(area_rgbs)/255.0, marker=mean_markers,
edgecolors='#545454', zorder=10)
for baseline_idx, (baseline, fse_val) in enumerate(prior_work_ade_results[pretty_dataset_name(dataset_name)].items()):
ax_fse.axhline(y=fse_val, label=baseline, color=line_colors[baseline_idx], linestyle=linestyles[baseline_idx])
if idx == 0:
handles, labels = ax_fse.get_legend_handles_labels()
handles = [handles[0], handles[4], handles[1], handles[5], handles[2], handles[6], handles[3]]
labels = [labels[0], 'Social GAN', labels[1], 'Trajectron', labels[2], alg_name, labels[3]]
ax_fse.legend(handles, labels,
loc='lower center', bbox_to_anchor=(0.5, 0.9),
ncol=4, borderaxespad=0, frameon=False,
bbox_transform=fig_fse.transFigure)
# fig_fse.text(0.51, 0.03, 'Dataset', ha='center')
plt.savefig('plots/ade_boxplots.pdf', dpi=300, bbox_inches='tight')
del perf_df
###Output
_____no_output_____
###Markdown
KDE Negative Log Likelihood Attention Radius 3m
###Code
# Load Ours
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}_12*kde_full.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = alg_name
perf_df = perf_df.append(dataset_df, ignore_index=True)
del perf_df['Unnamed: 0']
#perf_df.head()
# # Load Trajectron and SGAN
# lls_df = pd.concat([pd.read_csv(f) for f in glob.glob('csv/old/curr_*_lls.csv')], ignore_index=True)
# lls_df.loc[lls_df['method'] == 'our_full', 'method'] = 'Trajectron'
# lls_df['error_type'] = 'KDE'
# #lls_df.head()
for dataset in dataset_names:
if dataset != 'Average':
print('KDE NLL for ' + pretty_dataset_name(dataset))
#print(f"SGAN: {-lls_df[(lls_df['method'] == 'sgan') & (lls_df['dataset'] == dataset)]['log-likelihood'].mean()}")
#print(f"Trajectron: {-lls_df[(lls_df['method'] == 'Trajectron') & (lls_df['dataset'] == dataset)]['log-likelihood'].mean()}")
print(f"{alg_name}: {perf_df[(perf_df['method'] == alg_name) & (perf_df['dataset'] == dataset)]['value'].mean()}")
else:
print('KDE NLL for ' + pretty_dataset_name(dataset))
#print(f"SGAN: {-lls_df[(lls_df['method'] == 'sgan')]['log-likelihood'].mean()}")
#print(f"Trajectron: {-lls_df[(lls_df['method'] == 'Trajectron')]['log-likelihood'].mean()}")
print(f"{alg_name}: {perf_df[(perf_df['method'] == alg_name)]['value'].mean()}")
del perf_df
###Output
_____no_output_____
###Markdown
Most Likely FDE Attention Radius 3m
###Code
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}_12*fde_most_likely.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = 'Trajectron++'
perf_df = perf_df.append(dataset_df, ignore_index=True, sort=False)
del perf_df['Unnamed: 0']
for dataset in dataset_names:
print('FDE Most Likely for ' + pretty_dataset_name(dataset))
if dataset != 'Average':
print(f"{alg_name}: {perf_df[(perf_df['method'] == 'Trajectron++') & (perf_df['dataset'] == dataset)]['value'].mean()}")
else:
print(f"{alg_name}: {perf_df[(perf_df['method'] == 'Trajectron++')]['value'].mean()}")
del perf_df
###Output
_____no_output_____
###Markdown
Most Likely ADE Attention Radius 3m
###Code
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}_12*ade_most_likely.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = 'Trajectron++'
perf_df = perf_df.append(dataset_df, ignore_index=True, sort=False)
del perf_df['Unnamed: 0']
for dataset in dataset_names:
print('ADE Most Likely for ' + pretty_dataset_name(dataset))
if dataset != 'Average':
print(f"{alg_name}: {perf_df[(perf_df['method'] == 'Trajectron++') & (perf_df['dataset'] == dataset)]['value'].mean()}")
else:
print(f"{alg_name}: {perf_df[(perf_df['method'] == 'Trajectron++')]['value'].mean()}")
del perf_df
###Output
_____no_output_____
###Markdown
Best of 20 Evaluation FDE Attention Radius 3m
###Code
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}_12*fde_best_of.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = alg_name
perf_df = perf_df.append(dataset_df, ignore_index=True, sort=False)
del perf_df['Unnamed: 0']
for dataset in dataset_names:
print('FDE Best of 20 for ' + pretty_dataset_name(dataset))
if dataset != 'Average':
print(f"Trajectron++: {perf_df[(perf_df['method'] == alg_name) & (perf_df['dataset'] == dataset)]['value'].mean()}")
else:
print(f"Trajectron++: {perf_df[(perf_df['method'] == alg_name)]['value'].mean()}")
del perf_df
###Output
_____no_output_____
###Markdown
Best of 20 Evaluation ADE Attention Radius 3m
###Code
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}_12*ade_best_of.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = alg_name
perf_df = perf_df.append(dataset_df, ignore_index=True, sort=False)
del perf_df['Unnamed: 0']
for dataset in dataset_names:
print('ADE Best of 20 for ' + pretty_dataset_name(dataset))
if dataset != 'Average':
print(f"Trajectron++: {perf_df[(perf_df['method'] == alg_name) & (perf_df['dataset'] == dataset)]['value'].mean()}")
else:
print(f"Trajectron++: {perf_df[(perf_df['method'] == alg_name)]['value'].mean()}")
del perf_df
###Output
_____no_output_____
###Markdown
KDE Negative Log Likelihood Attention Radius 3m Velocity
###Code
# Load Ours
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}_vel_12*kde_full.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = alg_name
perf_df = perf_df.append(dataset_df, ignore_index=True)
del perf_df['Unnamed: 0']
#perf_df.head()
# # Load Trajectron and SGAN
# lls_df = pd.concat([pd.read_csv(f) for f in glob.glob('csv/old/curr_*_lls.csv')], ignore_index=True)
# lls_df.loc[lls_df['method'] == 'our_full', 'method'] = 'Trajectron'
# lls_df['error_type'] = 'KDE'
# #lls_df.head()
for dataset in dataset_names:
if dataset != 'Average':
print('KDE NLL for ' + pretty_dataset_name(dataset))
#print(f"SGAN: {-lls_df[(lls_df['method'] == 'sgan') & (lls_df['dataset'] == dataset)]['log-likelihood'].mean()}")
#print(f"Trajectron: {-lls_df[(lls_df['method'] == 'Trajectron') & (lls_df['dataset'] == dataset)]['log-likelihood'].mean()}")
print(f"{alg_name}: {perf_df[(perf_df['method'] == alg_name) & (perf_df['dataset'] == dataset)]['value'].mean()}")
else:
print('KDE NLL for ' + pretty_dataset_name(dataset))
#print(f"SGAN: {-lls_df[(lls_df['method'] == 'sgan')]['log-likelihood'].mean()}")
#print(f"Trajectron: {-lls_df[(lls_df['method'] == 'Trajectron')]['log-likelihood'].mean()}")
print(f"{alg_name}: {perf_df[(perf_df['method'] == alg_name)]['value'].mean()}")
del perf_df
###Output
_____no_output_____
###Markdown
Most Likely FDE Attention Radius 3m Velocity
###Code
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}_vel_12*fde_most_likely.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = 'Trajectron++'
perf_df = perf_df.append(dataset_df, ignore_index=True, sort=False)
del perf_df['Unnamed: 0']
for dataset in dataset_names:
print('FDE Most Likely for ' + pretty_dataset_name(dataset))
if dataset != 'Average':
print(f"{alg_name}: {perf_df[(perf_df['method'] == 'Trajectron++') & (perf_df['dataset'] == dataset)]['value'].mean()}")
else:
print(f"{alg_name}: {perf_df[(perf_df['method'] == 'Trajectron++')]['value'].mean()}")
del perf_df
###Output
_____no_output_____
###Markdown
Most Likely Evaluation ADE Attention Radius 3m Velocity
###Code
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}_vel_12*ade_most_likely.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = 'Trajectron++'
perf_df = perf_df.append(dataset_df, ignore_index=True, sort=False)
del perf_df['Unnamed: 0']
for dataset in dataset_names:
print('ADE Most Likely for ' + pretty_dataset_name(dataset))
if dataset != 'Average':
print(f"{alg_name}: {perf_df[(perf_df['method'] == 'Trajectron++') & (perf_df['dataset'] == dataset)]['value'].mean()}")
else:
print(f"{alg_name}: {perf_df[(perf_df['method'] == 'Trajectron++')]['value'].mean()}")
del perf_df
###Output
_____no_output_____
###Markdown
Best of 20 Evaluation FDE Attention Radius 3m Velocity
###Code
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}_vel_12*fde_best_of.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = alg_name
perf_df = perf_df.append(dataset_df, ignore_index=True, sort=False)
del perf_df['Unnamed: 0']
for dataset in dataset_names:
print('FDE Best of 20 for ' + pretty_dataset_name(dataset))
if dataset != 'Average':
print(f"Trajectron++: {perf_df[(perf_df['method'] == alg_name) & (perf_df['dataset'] == dataset)]['value'].mean()}")
else:
print(f"Trajectron++: {perf_df[(perf_df['method'] == alg_name)]['value'].mean()}")
del perf_df
###Output
_____no_output_____
###Markdown
Best of 20 Evaluation ADE Attention Radius 3m Velocity
###Code
perf_df = pd.DataFrame()
for dataset in dataset_names:
for f in glob.glob(f"results/{dataset}_vel_12*ade_best_of.csv"):
print(f)
dataset_df = pd.read_csv(f)
dataset_df['dataset'] = dataset
dataset_df['method'] = alg_name
perf_df = perf_df.append(dataset_df, ignore_index=True, sort=False)
del perf_df['Unnamed: 0']
for dataset in dataset_names:
print('ADE Best of 20 for ' + pretty_dataset_name(dataset))
if dataset != 'Average':
print(f"Trajectron++: {perf_df[(perf_df['method'] == alg_name) & (perf_df['dataset'] == dataset)]['value'].mean()}")
else:
print(f"Trajectron++: {perf_df[(perf_df['method'] == alg_name)]['value'].mean()}")
del perf_df
###Output
_____no_output_____ |
facility_location_problem.ipynb | ###Markdown
Upcapacited Facility Location Problem Background* You are deciding on five new potiental sites to build an FC.* You have a set of customer demands (individual customer orders) that you plan to fulfill from these FCs* Every customer order incurs a shipping cost. * Shipping cost for each cutomer order = (distance from FC to customer destination)/$100* We assume all customer orders are the same (no product variation)* Each facility that is opened will incur a one time fixed cost for construction* We assume facilities are uncapacitited Problem statement* Which five sites should you open to minimize overall costs while still being able to statify customer demand? Mathematical Formulation Decision Variables Decision Variables: $x_{ij}$ and $y_{c}$Let $x_{ij}$ = the number of orders to ship from facility ${i}$ to customer destination ${j}$Let $y_{i}$ = 1 if facility ${i}$ is selected, 0 otherwise $x_{ij}\in {\rm I\!R}$$y_{i}\in (0,1)$ Parameters Let $I$ be the set of possible FC locations to choose from Let $J$ be the set of all customers Let $N$ = the total number of facilities to openLet $f_i$ be the setup cost for constructing facility $i$Let $c_{ij}$ be the cost of shipping an order from facility $i$ to customer $j$Let $d_{j}$ be the demand from customer j Objective Function Objective Function: Minimize Z = startup_cost + transportation_cost$$ Z = \sum_{j \: \in \:I} f_iy_i + \sum_{i \: \in \:I}\sum_{j \: \in \:J}c_{ij}x_{ij}$$ Constraints Cosntratint 1: all demand must be met$$ \sum_{i \: \in \:I} x_{ij} = d_j \qquad \forall \enspace j \: \in \: J$$ Cosntratint 2: shipments can only be made from sites that have been selected to open$$ \sum_{j \: \in \:J} x_{ij} \leq M_iy_i\qquad \forall \enspace i \: \in \: I; $$ $$ x_{ij} \leq d_iy_i\qquad \forall \enspace i \: \in \: I;\enspace j \: \in \: J;$$ Cosntratint 3: Exactly N new sites must be opened (5)$$ \sum_{i \: \in \:I} y_i = N $$ Cosntratint 4:$$ x_{ij} \geq 0 \qquad \forall \enspace i \: \epsilon \: I; \enspace j \: \epsilon \: J$$ $$ y_{j} \in {0,1}$$ CPlex/Python LP Model Import packages
###Code
import pandas as pd
import numpy as np
from docplex.mp.model import Model
import math
###Output
_____no_output_____
###Markdown
Intializing the data
###Code
# reading in external data
facility_df = pd.read_csv('facility_data.csv')
customer_df = pd.read_csv('customer_data.csv')
# caluclating distance
facility_df['key'] = 1
customer_df['key'] = 1
result = pd.merge(facility_df, customer_df, on ='key').drop("key", 1)
result['temp_x'] = (result['facility_x'] - result['customer_x'])**2
result['temp_y'] = (result['facility_y'] - result['customer_y'])**2
result['temp_x_y'] = (result['temp_x'] + result['temp_y'])
result['distance'] = round(np.sqrt(result['temp_x_y']),2)
distance_df = result.copy()[['facility','customer','distance']]
distance_df.head()
# for use in model
distance = dict([((t.facility, t.customer),t.distance ) for t in distance_df.itertuples()])
setup_cost = dict([((t.facility),t.setup_cost ) for t in facility_df.itertuples()])
demand = dict([((t.customer),t.demand ) for t in customer_df.itertuples()])
fc = set(facility_df['facility'])
customer = set(customer_df['customer'])
edges = [(i, j) for i in fc for j in customer]
N = 5 # number of sits to open
M = round(distance_df['distance'].max()+100,0)
###Output
_____no_output_____
###Markdown
Create the model
###Code
m=Model('TSP')
###Output
_____no_output_____
###Markdown
Decision Variables
###Code
x = m.continuous_var_dict(edges, name ='assignment')
y = m.binary_var_dict(fc, name = 'fc')
###Output
_____no_output_____
###Markdown
Objective Function
###Code
trans_cost = m.sum(distance[e]*x[e] for e in edges)
startup_cost = m.sum(setup_cost[i]*y[i] for i in fc)
m.minimize(startup_cost + trans_cost)
###Output
_____no_output_____
###Markdown
Constraints
###Code
# Constraint 1: all demand must be met
for j in customer:
m.add_constraint(m.sum(x[(i,j)] for i in fc) == demand[j], ctname='demand_%d'%j)
# Constraint 2: shipments can only be made if the site has been opened
for i in fc:
m.add_constraint(m.sum(x[(i,j)] for j in customer) <= M*y[i])
for e in edges:
m.add_constraint(x[(i,j)] <= demand[j]*y[i])
# Constraint 3: exactly five sites must be opened
m.add_constraint(m.sum(y[(i)] for i in fc) == N, ctname='const3')
print(m.export_to_string())
m.parameters.timelimit=120
m.parameters.mip.strategy.branch=1
m.parameters.mip.tolerances.mipgap=0.15
solution = m.solve(log_output=True)
m.get_solve_status()
solution.display()
# Export results to csv
import os
base_dir = os.getcwd()
def export_soln_to_csv(df, model_name = 'untitled'):
""" model refers to model object from docplex.mp.model"""
try:
os.mkdir(os.path.join(base_dir, 'output'))
except:
pass
filename = 'output/' + 'soln_' + model_name + '.csv'
solution_output = os.path.join(os.getcwd(), filename)
df.to_csv(solution_output, index=False)
###Output
_____no_output_____ |
notebooks/trunk/Sin.ipynb | ###Markdown
This is a demo of Universal Approximation Theorem of Feed Forward Neural Networks We developed a software called SYSNet (https://github.com/mehdirezaie/SYSNet) to model and mitigate the imaging systematics (due to seeing, airmass, galactic extinction) in imaging surveys. The main methodology is described in Rezaie et al. 2019. This notebook is going to prove the main concept. modulesThe main pipeline is implemented inside regression.py (https://github.com/mehdirezaie/LSSutils/blob/master/LSSutils/nn/regression.py) which is the main idea of SYSNet, but modified to be compatible with TensorFlow 2.x. The `regression` module has the implementation of the neural networks, while `selection` has the implementation of the backward feature elimination/selection.
###Code
%load_ext autoreload
%autoreload 2
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import time
import sys
import os
home = os.getenv("HOME")
sys.path.append(f'{home}/github/LSSutils')
import regression # for regression
import selection # for feature selection
from LSSutils import setup_logging
setup_logging('info')
###Output
_____no_output_____
###Markdown
Demo 1: Model Sin(x) We want to show that a neural network can model the function sin(x). We create synthetic data based on sin(x) and train the neural network that is defined in the module `regression.py`
###Code
#import sys
#sys.path.append('/Users/mehdi/github/LSSutils')
# split data into train, test and validation sets
from LSSutils.utils import split2Kfolds
def TABLE(n = 512):
# create mock data
np.random.seed(1234567)
x = np.linspace(0., 2.*np.pi, n)
z = np.random.uniform(0, 2*np.pi, size=n)
np.random.shuffle(x) # inplace
y = np.sin(x) #+ 0.2*z
#x = x[:, np.newaxis]
x = np.column_stack([x, z])
n,m = x.shape
d = np.empty(n, dtype=[('label', 'f8'),
('features', ('f8', m)),
('fracgood', 'f8'),
('hpind', 'i8')])
d['label'] = y
if m==1:
d['features']=x.squeeze()
else:
d['features']=x
d['hpind']=1.
d['fracgood']=1.0
return d
# make table [label, features, fracgood, hpind]
Table = TABLE() # make table
Data5f = split2Kfolds(Table, k=5) # split
# take one fold for example
fold = 'fold0'
train = regression.Data(Data5f['train'][fold])
test = regression.Data(Data5f['test'][fold])
valid = regression.Data(Data5f['validation'][fold])
t_i = time.time()
Net = regression.NetRegression(train, valid, test)
Net.fit(hyperparams=True) # perform hyper-parameter training
Net._descale() # descale
t_f = time.time()
plt.figure()
plt.scatter(test.x[:,0], test.y)
plt.scatter(test.x[:,0], Net.ypreds[0]);plt.show()
plt.scatter(test.y, Net.ypreds[0]);plt.show()
print(f'took {t_f-t_i} secs')
Net.make_plots()
###Output
_____no_output_____ |
Vino Verde Red wine Quality using ANN.ipynb | ###Markdown
Case-study on real Time dataset using Artificial Neural Networks 2 We are going to do a case study on case study on real time dataset using Arificial Neural Network(ANN).The dataset in which we going to do the ANN is related to the red variants of the Portuguese "Vinho Verde" wine whether a red wine is of good quality or not according to its chemical properties.
###Code
#Import the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sb
#Import the dataset
data=pd.read_csv('C:/Users/aksha/Desktop/ai/datasets/winequal/redwinequal.csv')
data.head()
data.shape
###Output
_____no_output_____
###Markdown
This dataset contains 1599 rows and 12 columns. The columns are namely the different chemical properties in a wine that helps in maintaining the quality of a wine.1) fixed acidity: most acids involved with wine or fixed or nonvolatile (do not evaporate readily).2) volatile acidity: the amount of acetic acid in wine, which at too high of levels can lead to an unpleasant, vinegar taste.3) citric acid: found in small quantities, citric acid can add 'freshness' and flavor to wines.4) residual sugar: the amount of sugar remaining after fermentation stops, it's rare to find wines with less than 1 gram/liter and wines with greater than 45 grams/liter are considered sweet.5) chlorides: he amount of salt in the wine.6) free sulfur dioxide: the free form of SO2 exists in equilibrium between molecular SO2 (as a dissolved gas) and bisulfite ion; it prevents microbial growth and the oxidation of wine.7) total sulfur dioxide: amount of free and bound forms of S02; in low concentrations, SO2 is mostly undetectable in wine, but at free SO2 concentrations over 50 ppm, SO2 becomes evident in the nose and taste of wine.8) density: the density of water is close to that of water depending on the percent alcohol and sugar content.9) pH: describes how acidic or basic a wine is on a scale from 0 (very acidic) to 14 (very basic); most wines are between 3-4 on the pH scale.10) sulphates: a wine additive which can contribute to sulfur dioxide gas (S02) levels, wich acts as an antimicrobial and antioxidant.11) alcohol: the percent alcohol content of the wine.12) quality: output variable (based on sensory data, score between 0 and 10).7 or higher getting classified as 'good' and the remainder as 'not good'
###Code
sb.heatmap(data.isnull())
###Output
_____no_output_____
###Markdown
the above data shows that there is no null value.
###Code
sb.countplot(data['quality'])
###Output
_____no_output_____
###Markdown
the above data contains highest number of results that lead to 5 followed by 6.
###Code
for index, row in data.iterrows():
A=row['quality']
if(A>=7):
data.loc[index,'Good'] = 1
else:
data.loc[index,'Good'] = 0
data
data.drop('quality',axis=1,inplace=True)
data
x=data.iloc[:,0:11].values
y=data.iloc[:,11].values
x
y
#Splitting the dataset into train and test set
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=0)
#Feature scaling
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
x_train=sc.fit_transform(x_train)
x_test=sc.transform(x_test)
x_test
#import the keras libraries
import keras
from keras.models import Sequential
from keras.layers import Dense
#Initialising the ANN
classifier=Sequential()
#Adding the input and first hidden layer
classifier.add(Dense(units=6,kernel_initializer='uniform',activation='relu',input_dim=11))
#Adding the second hidden layer
classifier.add(Dense(units=6,kernel_initializer='uniform',activation='relu'))
#Adding the output layer
classifier.add(Dense(units=1,kernel_initializer='uniform',activation='sigmoid'))
#Compiling the ANN
classifier.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
#Fitting the ANN to the training set
classifier.fit(x_train,y_train,batch_size=10,epochs=100)
y_pred=classifier.predict(x_test)
y_pred=(y_pred>0.5)
#Accuracy score
from sklearn.metrics import accuracy_score
print('accuracy score:',accuracy_score(y_test,y_pred))
y_pred
#confusion matrix and classification report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
print('confusion_matrix:',confusion_matrix(y_test,y_pred),sep='\n')
print('accuracy score:',accuracy_score(y_test,y_pred))
print(classification_report(y_test,y_pred))
###Output
precision recall f1-score support
0.0 0.95 0.92 0.93 290
1.0 0.39 0.50 0.44 30
micro avg 0.88 0.88 0.88 320
macro avg 0.67 0.71 0.69 320
weighted avg 0.90 0.88 0.89 320
###Markdown
ROC
###Code
#Importing ROC-AUC packages
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
def plot_roc_curve(fpr,tpr):
plt.plot(fpr,tpr,color='red',label='ROC')
plt.plot([1,0],[0,1],color='black',linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC(Receiver Operating Curve)')
plt.legend()
plt.show()
probs=classifier.predict_proba(x_test)
probs
auc=roc_auc_score(y_test,probs)
print('AUC:%.2f'%auc)
fpr,tpr,thresholds=roc_curve(y_test,probs)
plot_roc_curve(fpr,tpr)
###Output
_____no_output_____ |
data/lec1_1_Basics_of_DataFrame_and_Series_ForOnlineLecture.ipynb | ###Markdown
**Pandas version 0.25.1 (`pip install pandas==0.25.1`)** `Series` Data type - Numpy's ndarray + 숫자가 아닌 다른 type의 index (E.g. 문자열)
###Code
import pandas as pd
a = pd.Series([1,2,3,4])
a
# 첫번째 방법
s2 = pd.Series(
[1, 2, 3, 4],
index=['a', 'b', 'c', 'd']
)
s2
s2.head(2)
# 두번째방법
s2 = pd.Series({'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5})
s2.head()
###Output
_____no_output_____
###Markdown
- 한가지 data type만 가지고 있을 수 있음 `nan`과 관련된 함수
###Code
import numpy as np
np.nan
s = pd.Series([10, 0, 1, 1, 2, 3, 4, 5, 6, np.nan])
s
len(s)
s.shape
s.count() # not count `nan`
s.unique()
# 수업에서는 다루지 않았지만, nunique()는 unique한 값들의 총 갯수를 알려주는 함수입니다.
# s.nunique()
s.value_counts()
###Output
_____no_output_____
###Markdown
- 이 외의 함수들에 대해서는 이후 수업에서 하나씩 다룰 예정! index label을 기준으로 Series간에 operation이 일어남 - Data의 '순서'가 아니라 index label이 자동으로 정렬되어 연산이 진행됨!
###Code
s3 = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
s4 = pd.Series([4, 3, 2, 1], index=['d', 'c', 'b', 'a'])
s3 + s4
###Output
_____no_output_____
###Markdown
`DataFrame` Data type - 다수의 Series를 하나의 변수로 관리할 수 있도록 만든 자료형 - Series의 dict 형태라고 보면됨 - `{'컬럼명1': Series1, '컬럼명2': Series2}` - 각 Series는 DataFrame의 column을 이룸 - 당연히 DataFrame을 이루는 Series간의 index는 서로 다 같음! => 동일 index 사용 DataFrame을 만드는 다양한 방법들
###Code
s1 = np.arange(1, 6, 1)
s2 = np.arange(6, 11, 1)
s1
s2
df = pd.DataFrame(
{
'c1': s1,
'c2': s2
}
)
df
# 1번째 방법 (Default index and columns would be set)
pd.DataFrame(
[
[10,11],
[10,12]
]
)
pd.DataFrame(
np.array(
[
[10, 11],
[20, 21]
]
)
)
# 2번째 방법 (많이 안쓰임)
pd.DataFrame(
[
pd.Series(np.arange(10, 15)), # 굳이 Series가 아니고 list형태이기만 하면 됨(=iterable한 object면 다 가능)
pd.Series(np.arange(15, 20)), # 굳이 Series가 아니고 list형태이기만 하면 됨(=iterable한 object면 다 가능)
]
)
pd.DataFrame(
[
np.arange(10, 15),
np.arange(15, 20),
]
)
# 3번째 방법 (with column & index names)
pd.DataFrame(
np.array(
[
[10, 11],
[20, 21]
]
),
columns=['a', 'b'],
index=['r1', 'r2']
)
# 4번째 방법
s1 = pd.Series(np.arange(1, 6, 1)) # 굳이 Series가 아니고 list형태이기만 하면 됨(=iterable한 object면 다 가능)
s2 = pd.Series(np.arange(6, 11, 1)) # 굳이 Series가 아니고 list형태이기만 하면 됨(=iterable한 object면 다 가능)
pd.DataFrame(
{
'c1': [1,2,3], # list, np.array, Series 전부 다 올 수 있음!
'c2': [4,5,6]
}
)
# 참고: 1줄짜리 만들 때도 dictionary의 value에 해당하는 값들은 iterable한 data type(e.g. list, np.array, Series 등)으로 설정해줘야함
pd.DataFrame({'c1': [0], 'c2': [1]})
s1 = pd.Series(np.arange(1, 6, 1), index=['a', 'b', 'c', 'd', 'e'])
s2 = pd.Series(np.arange(6, 11, 1), index=['b', 'c', 'd', 'f', 'g'])
df = pd.DataFrame(
{
'c1': s1,
'c2': s2
}
)
###Output
_____no_output_____
###Markdown
DataFrame 생성시, Series간에 Index 기준으로 자동정렬!
###Code
s1 = pd.Series(np.arange(1, 6, 1))
s2 = pd.Series(np.arange(6, 11, 1))
s3 = pd.Series(np.arange(12, 15), index=[1, 2, 10]) # this one has index values unlike s1, s2
s1
s2
s3
df = pd.DataFrame({'c1': s1, 'c2': s2, 'c3': s3})
df
###Output
_____no_output_____
###Markdown
DataFrame에 새로운 column 추가하기
###Code
my_dict['a'] = 1
df['c4'] = pd.Series([1,2,3,4], index=[0, 1, 2, 10])
df
###Output
_____no_output_____
###Markdown
Reindexing - 새로운 index label을 기반으로 기존의 "index-value" mapping은 유지한채 재배열하는 것 참고: index 자체를 바꾸는 것("index-value" mapping이 깨짐)
###Code
s = pd.Series([1,2,3,4,5])
s
s.index = ['a', 'b', 'c', 'd', 'e']
s
###Output
_____no_output_____
###Markdown
참고 : `set_index()` : 특정 column을 index로 만듦
###Code
# 위의 'DataFrame 생성시, Series간에 Index 기준으로 자동정렬!' 챕터에서 정의한 dataframe입니다
df
df['c5'] = pd.Series([1,2,3,4,5,6], index=[0,1,2,3,4,10])
df
df.set_index("c5")
###Output
_____no_output_____
###Markdown
Reindex
###Code
s2 = s.reindex(
['a', 'c', 'e', 'g']
)
s2
# Copied
s2['a'] = 0
s2
# s는 s2의 값을 바꿔도 안 건드려짐
s
# [X] 이렇게 하면 안됨
s1 = pd.Series([0, 1, 2], index=[0, 1, 2])
s2 = pd.Series([3, 4, 5], index=['0', '1', '2'])
s1
s2
s1 + s2
s1.index
s2 = s2.reindex(s1.index)
s2
# 첫번째 방법
s1 = pd.Series([0, 1, 2], index=[0, 1, 2])
s2 = pd.Series([3, 4, 5], index=['0', '1', '2'])
s2.index = s2.index.astype(int)
s2
s2.index
s1 + s2
# 두번째 방법
s1 = pd.Series([0, 1, 2], index=[0, 1, 2])
s2 = pd.Series([3, 4, 5], index=['0', '1', '2'])
s1.index = ['a', 'b', 'c']
s2.index = ['a', 'b', 'c']
s1 + s2
###Output
_____no_output_____
###Markdown
`reindex()`의 유용한 Arguments - `fill_value`
###Code
s2 = s.copy()
s2
s2.reindex(['a', 'f'])
s2.reindex(['a', 'f'], fill_value=0) # fill 0 insteand of Nan
###Output
_____no_output_____
###Markdown
- `method`
###Code
s3 = pd.Series(['red', 'green', 'blue'], index=[0, 3, 5])
s3
s3.reindex(np.arange(0,7))
s3.reindex(np.arange(0,7), method='ffill')
###Output
_____no_output_____
###Markdown
예제
###Code
# 맨 첫 강의에서 라이브러리를 설치할 때 requirements.txt를 이용해서 설치를 했으면, 건너뛰셔도 됩니다.
!pip install finance_datareader == 0.9.1
import FinanceDataReader as fdr
# 삼성전자
df1 = fdr.DataReader("005930", '2018-01-02', '2018-10-30')
# KODEX 200 (ETF)
df2 = fdr.DataReader("069500", '2018-01-03', '2018-10-30')
df1.head(2)
df1.tail(2)
df2.head(2)
df2.tail(2)
# 삼성전자
df1 = fdr.DataReader("005930", '2018-01-02', '2018-10-30')
# KODEX 200 (ETF)
df2 = fdr.DataReader("069500", '2018-01-02', '2018-10-30')
df1.shape
df2.shape
df2 = df2.drop(pd.to_datetime("2018-01-03"))
df2.head()
df1.head()
new_df2 = df2.reindex(df1.index)
new_df2.head()
df1.shape
new_df2.shape
new_df2.fillna(method="ffill")
###Output
_____no_output_____ |
pastis/temporal_analysis/harris_mode-medium.ipynb | ###Markdown
Define and create directory
###Code
root_dir = "/Users/asahoo/Desktop/data_repos/harris_data"
repo_dir = "/Users/asahoo/repos/PASTIS"
coronagraph_design = 'medium'
overall_dir = util.create_data_path(root_dir, telescope='luvoir_'+coronagraph_design)
resDir = os.path.join(overall_dir, 'matrix_numerical')
print(resDir)
# Create necessary directories if they don't exist yet
os.makedirs(resDir, exist_ok=True)
os.makedirs(os.path.join(resDir, 'OTE_images'), exist_ok=True)
os.makedirs(os.path.join(resDir, 'psfs'), exist_ok=True)
###Output
_____no_output_____
###Markdown
Read from configfile
###Code
nb_seg = CONFIG_PASTIS.getint('LUVOIR', 'nb_subapertures')
wvln = CONFIG_PASTIS.getfloat('LUVOIR', 'lambda') * 1e-9 # m #this doesn't matter, luvoir.wvln
diam = CONFIG_PASTIS.getfloat('LUVOIR', 'diameter') # m
nm_aber = CONFIG_PASTIS.getfloat('LUVOIR', 'calibration_aberration') * 1e-9 # m
sampling = CONFIG_PASTIS.getfloat('LUVOIR', 'sampling')
optics_path_in_repo = CONFIG_PASTIS.get('LUVOIR', 'optics_path_in_repo')
aper_path = CONFIG_PASTIS.get('LUVOIR','aperture_path_in_optics')
aper_ind_path = CONFIG_PASTIS.get('LUVOIR', 'indexed_aperture_path_in_optics')
aper_read = hcipy.read_fits(os.path.join(repo_dir,optics_path_in_repo,aper_path))
aper_ind_read = hcipy.read_fits(os.path.join(repo_dir,optics_path_in_repo,aper_ind_path))
z_pup_downsample = CONFIG_PASTIS.getfloat('numerical', 'z_pup_downsample')
###Output
_____no_output_____
###Markdown
Load aperture files to make segmented mirror
###Code
pupil_grid = hcipy.make_pupil_grid(dims=aper_ind_read.shape[0], diameter=15)
aper = hcipy.Field(aper_read.ravel(), pupil_grid)
aper_ind = hcipy.Field(aper_ind_read.ravel(), pupil_grid)
wf_aper = hcipy.Wavefront(aper, wvln)
# Load segment positions from fits header
hdr = fits.getheader(os.path.join(repo_dir,optics_path_in_repo,aper_ind_path))
poslist = []
for i in range(nb_seg):
segname = 'SEG' + str(i+1)
xin = hdr[segname + '_X']
yin = hdr[segname + '_Y']
poslist.append((xin, yin))
poslist = np.transpose(np.array(poslist))
seg_pos = hcipy.CartesianGrid(hcipy.UnstructuredCoords(poslist))
plt.figure(figsize=(20,10))
plt.subplot(2,3,1)
plt.title("pupil_grid")
plt.plot(pupil_grid.x, pupil_grid.y, '+')
plt.xlabel('x')
plt.ylabel('y')
plt.subplot(2,3,2)
plt.title("aper")
hcipy.imshow_field(aper)
plt.tick_params(top=False, bottom=False, left=False, right=False,
labelleft=False, labelbottom=False)
plt.colorbar()
plt.subplot(2,3,3)
plt.title("aper_ind")
hcipy.imshow_field(aper_ind)
plt.colorbar()
plt.subplot(2,3,4)
plt.title("wf_aper.phase")
hcipy.imshow_field(wf_aper.phase)
plt.colorbar()
plt.subplot(2,3,5)
plt.title("wf_aper.amplitude")
hcipy.imshow_field(wf_aper.amplitude)
plt.colorbar()
plt.subplot(2,3,6)
plt.title("seg_pos")
plt.plot(seg_pos.x, seg_pos.y, '+')
plt.xlabel('x')
plt.ylabel('y')
plt.colorbar()
###Output
_____no_output_____
###Markdown
Instantiate LUVOIR
###Code
optics_input = os.path.join(util.find_repo_location(), CONFIG_PASTIS.get('LUVOIR', 'optics_path_in_repo'))
luvoir = LuvoirA_APLC(optics_input, coronagraph_design, sampling)
hcipy.imshow_field(luvoir.apodizer)
hcipy.imshow_field(luvoir.fpm)
N_pup_z = np.int(luvoir.pupil_grid.shape[0] / z_pup_downsample) #N_pup_z = 100
grid_zernike = hcipy.field.make_pupil_grid(N_pup_z, diameter=luvoir.diam)
plt.figure(figsize=(10,10))
plt.title("grid_zernike") #hcipy cartesian grid
plt.plot(grid_zernike.x, grid_zernike.y, '+')
plt.xlabel('x')
plt.ylabel('y')
###Output
_____no_output_____
###Markdown
load thermal modes files
###Code
filepath = "/Users/asahoo/repos/PASTIS/Jupyter Notebooks/LUVOIR/Sensitivities2.xlsx"
pad_orientation = np.pi/2*np.ones(nb_seg)
#pad_orientation = np.zeros(nb_seg)
###Output
_____no_output_____
###Markdown
create harris deformabale mirror
###Code
luvoir.create_segmented_harris_mirror(filepath,pad_orientation, thermal = True,mechanical=False,other=False)
luvoir.harris_sm
###Output
_____no_output_____
###Markdown
creating single segment
###Code
segment = hcipy.hexagonal_aperture(luvoir.segment_circumscribed_diameter, np.pi/2) #function
segment_sampled = hcipy.evaluate_supersampled(segment,luvoir.pupil_grid, 10) #hcipy field
###Output
_____no_output_____
###Markdown
creating nb_seg segments
###Code
aper2, segs2 = hcipy.make_segmented_aperture(segment,luvoir.seg_pos, segment_transmissions=1, return_segments=True)
luvoir_segmented_pattern = hcipy.evaluate_supersampled(aper2, luvoir.pupil_grid, 10)
seg_evaluated = []
for seg_tmp in segs2:
tmp_evaluated = hcipy.evaluate_supersampled(seg_tmp, luvoir.pupil_grid, 1)
seg_evaluated.append(tmp_evaluated)
###Output
_____no_output_____
###Markdown
Plotting Harris_mode
###Code
df = pd.read_excel(filepath)
valuesA = np.asarray(df.a)
valuesB = np.asarray(df.b)
valuesC = np.asarray(df.c)
valuesD = np.asarray(df.d)
valuesE = np.asarray(df.e)
valuesF = np.asarray(df.f)
valuesG = np.asarray(df.g)
valuesH = np.asarray(df.h)
valuesI = np.asarray(df.i)
valuesJ = np.asarray(df.j)
valuesK = np.asarray(df.k)
seg_x = np.asarray(df.X)
seg_y = np.asarray(df.Y)
harris_seg_diameter = np.max([np.max(seg_x) - np.min(seg_x), np.max(seg_y) - np.min(seg_y)])
pup_dims = luvoir.pupil_grid.dims
x_grid = np.asarray(df.X) * luvoir.segment_circumscribed_diameter /harris_seg_diameter
y_grid = np.asarray(df.Y) * luvoir.segment_circumscribed_diameter /harris_seg_diameter
points = np.transpose(np.asarray([x_grid, y_grid]))
seg_evaluated = luvoir._create_evaluated_segment_grid()
def _transform_harris_mode(values, xrot, yrot, points, seg_evaluated, seg_num):
""" Take imported Harris mode data and transform into a segment mode on our aperture. """
zval = griddata(points, values, (xrot, yrot), method='linear')
zval[np.isnan(zval)] = 0
zval = zval.ravel() * seg_evaluated[seg_num]
return zval
harris_base_thermal = []
for seg_num in range(0, luvoir.nseg):
grid_seg = luvoir.pupil_grid.shifted(-luvoir.seg_pos[seg_num])
x_line_grid = np.asarray(grid_seg.x)
y_line_grid = np.asarray(grid_seg.y)
# Rotate the modes grids according to the orientation of the mounting pads
phi = pad_orientation[seg_num]
x_rotation = x_line_grid * np.cos(phi) + y_line_grid * np.sin(phi)
y_rotation = -x_line_grid * np.sin(phi) + y_line_grid * np.cos(phi)
# Transform all needed Harris modes from data to modes on our segmented aperture
ZA = _transform_harris_mode(valuesA, x_rotation, y_rotation, points, seg_evaluated, seg_num)
ZB = _transform_harris_mode(valuesB, x_rotation, y_rotation, points, seg_evaluated, seg_num)
ZC = _transform_harris_mode(valuesC, x_rotation, y_rotation, points, seg_evaluated, seg_num)
ZD = _transform_harris_mode(valuesD, x_rotation, y_rotation, points, seg_evaluated, seg_num)
ZE = _transform_harris_mode(valuesE, x_rotation, y_rotation, points, seg_evaluated, seg_num)
ZF = _transform_harris_mode(valuesF, x_rotation, y_rotation, points, seg_evaluated, seg_num)
ZG = _transform_harris_mode(valuesG, x_rotation, y_rotation, points, seg_evaluated, seg_num)
ZH = _transform_harris_mode(valuesH, x_rotation, y_rotation, points, seg_evaluated, seg_num)
ZI = _transform_harris_mode(valuesI, x_rotation, y_rotation, points, seg_evaluated, seg_num)
ZJ = _transform_harris_mode(valuesJ, x_rotation, y_rotation, points, seg_evaluated, seg_num)
ZK = _transform_harris_mode(valuesK, x_rotation, y_rotation, points, seg_evaluated, seg_num)
harris_base_thermal.append([ZA, ZB, ZC, ZD, ZE, ZF, ZG, ZH, ZI, ZJ, ZK])
###Output
_____no_output_____
###Markdown
Flatten all DMs and create unaberrated reference PSF
###Code
n_harris = luvoir.harris_sm.num_actuators #int = 5*120 =600
harris_mode =np.zeros(n_harris)
luvoir.harris_sm.actuators = harris_mode #setting all actuators to be zero
###Output
_____no_output_____
###Markdown
Calculate the unaberrated coro and direct PSFs in INTENSITY
###Code
unaberrated_coro_psf, ref = luvoir.calc_psf(ref=True, display_intermediate=False, norm_one_photon=True)
plt.figure(figsize=(15,5))
plt.subplot(1,2,1)
plt.title("unaberrated_coro_psf")
hcipy.imshow_field(np.log(np.abs(unaberrated_coro_psf)))
plt.colorbar()
plt.subplot(1,2,2)
plt.title("ref")
hcipy.imshow_field(np.log(np.abs(ref)))
plt.colorbar()
norm = np.max(ref)
print(norm)
dh_intensity = (unaberrated_coro_psf / norm) * luvoir.dh_mask
contrast_floor = np.mean(dh_intensity[np.where(luvoir.dh_mask != 0)])
print(f'contrast floor: {contrast_floor}')
hcipy.imshow_field(dh_intensity)
plt.title("dh_intensity")
###Output
_____no_output_____
###Markdown
Calculate the unaberrated coro and direct PSFs in E-FIELDS
###Code
# Calculate the unaberrated coro and direct PSFs in E-FIELDS
nonaberrated_coro_psf, ref, efield = luvoir.calc_psf(ref=True, display_intermediate=False, return_intermediate='efield',norm_one_photon=True)
Efield_ref = nonaberrated_coro_psf.electric_field
plt.figure(figsize=(25, 10))
plt.subplot(1,2,1)
hcipy.imshow_field(np.log(np.abs(nonaberrated_coro_psf.amplitude)))
plt.title("nonaberrated_coro_psf.amplitude")
plt.colorbar()
plt.subplot(1,2,2)
hcipy.imshow_field(np.log(np.abs(ref.amplitude)))
plt.title("ref.amplitude")
plt.colorbar()
print('Generating the E-fields for harris modes in science plane')
print(f'Calibration aberration used: {nm_aber} m')
start_time = time.time()
focus_fieldS = []
focus_fieldS_Re = []
focus_fieldS_Im = []
harris_mode = np.zeros(n_harris)
for pp in range(0, n_harris):
print(f'Working on mode {pp}/{n_harris}')
# Apply calibration aberration to used mode
harris_mode = np.zeros(n_harris)
harris_mode[pp] = (nm_aber)/2
luvoir.harris_sm.actuators = harris_mode
# Calculate coronagraphic E-field and add to lists
aberrated_coro_psf, inter = luvoir.calc_psf(display_intermediate=False, return_intermediate='efield',norm_one_photon=True)
focus_field1 = aberrated_coro_psf
focus_fieldS.append(focus_field1)
focus_fieldS_Re.append(focus_field1.real)
focus_fieldS_Im.append(focus_field1.imag)
plt.figure(figsize=(10, 10))
hcipy.imshow_field(np.log(np.abs(focus_fieldS[89].electric_field)))
plt.colorbar()
luvoir_test = LuvoirA_APLC(optics_input, coronagraph_design, sampling)
luvoir_test.create_segmented_harris_mirror(filepath,pad_orientation, thermal = True,mechanical=False,other=False)
luvoir_test.harris_sm
harris_mode = np.zeros(n_harris)
harris_mode[116] = nm_aber
luvoir_test.harris_sm.actuators = harris_mode
hcipy.imshow_field(((10*luvoir_test.harris_sm.surface+1e-8*luvoir_segmented_pattern)))
plt.colorbar()
###Output
_____no_output_____
###Markdown
Construct the PASTIS matrix from the E-fields
###Code
mat_fast = np.zeros([n_harris, n_harris]) # create empty matrix
for i in range(0, n_harris):
for j in range(0, n_harris):
test = np.real((focus_fieldS[i].electric_field - Efield_ref) * np.conj(focus_fieldS[j].electric_field - Efield_ref))
dh_test = (test / norm) * luvoir.dh_mask
contrast = np.mean(dh_test[np.where(luvoir.dh_mask != 0)])
mat_fast[i, j] = contrast
matrix_pastis = np.copy(mat_fast)
matrix_pastis /= np.square(nm_aber * 1e9)
plt.figure(figsize=(15,5))
#plt.subplot(1,2,1)
plt.imshow(np.log(np.abs(mat_fast)))
#plt.title("PASTIS matrix")
#plt.savefig('/Users/asahoo/Desktop/P_matrix.png')
#plt.colorbar()
# plt.subplot(1,2,2)
# plt.imshow(np.log(np.abs(matrix_pastis)))
# plt.title("np.log(np.abs(matrix_pastis))")
# plt.colorbar()
filename_matrix = 'PASTISmatrix_n_harris_' + str(n_harris)
hcipy.write_fits(matrix_pastis, os.path.join(resDir, filename_matrix + '.fits'))
print('Matrix saved to:', os.path.join(resDir, filename_matrix + '.fits'))
filename_matrix = 'EFIELD_Re_matrix_n_harris_' + str(n_harris)
hcipy.write_fits(focus_fieldS_Re, os.path.join(resDir, filename_matrix + '.fits'))
print('Efield Real saved to:', os.path.join(resDir, filename_matrix + '.fits'))
filename_matrix = 'EFIELD_Im_matrix_n_harris_' + str(n_harris)
hcipy.write_fits(focus_fieldS_Im, os.path.join(resDir, filename_matrix + '.fits'))
print('Efield Imag saved to:', os.path.join(resDir, filename_matrix + '.fits'))
end_time = time.time()
print('Runtime for harris modes:', end_time - start_time, 'sec =', (end_time - start_time) / 60, 'min')
print('Data saved to {}'.format(resDir))
###Output
_____no_output_____
###Markdown
error analysis
###Code
evals, evecs = np.linalg.eig(matrix_pastis)
sorted_evals = np.sort(evals)
sorted_indices = np.argsort(evals)
sorted_evecs = evecs[:, sorted_indices]
plt.figure(figsize=(10, 10))
#plt.plot(evals, label='Unsorted from eigendecomposition')
plt.plot(sorted_evals)
plt.semilogy()
plt.xlabel('Mode Index')
plt.ylabel('Sensitivity of contrast for each mode')
plt.tick_params(top=True, bottom=True, left=True, right=True,
labelleft=True, labelbottom=True)
#plt.legend()
c_target_log = -11
c_target = 10**(c_target_log)
n_repeat = 20
mu_map_harris = np.sqrt(((c_target) / (n_harris)) / (np.diag(matrix_pastis)))
plt.figure(figsize=(20,5))
plt.title("Segment-based PASTIS constraints from PASTIS matrix and PASTIS modes")
plt.plot(mu_map_harris)
sigma = np.sqrt(((c_target)) / (600 * sorted_evals))
plt.figure(figsize=(20,5))
plt.title("Max mode contribution(s) from the static-contrast target and eigen values")
plt.plot(sigma)
cont_cum_pastis = []
for maxmode in range(sorted_evecs.shape[0]):
aber = np.nansum(sorted_evecs[:, :maxmode+1] * sigma[:maxmode+1], axis=1)
aber *= u.nm
contrast_matrix = util.pastis_contrast(aber, matrix_pastis) + contrast_floor
cont_cum_pastis.append(contrast_matrix)
plt.figure(figsize=(10,10))
plt.plot(cont_cum_pastis)
plt.xlabel("modes")
plt.ylabel("List of cumulative contrast")
cont_ind_pastis = []
for maxmode in range(sorted_evecs.shape[0]):
aber = sorted_evecs[:, maxmode] * sigma[maxmode]
aber *=u.nm
contrast_matrix = util.pastis_contrast(aber, matrix_pastis)
cont_ind_pastis.append(contrast_matrix)
plt.figure(figsize=(20,10))
plt.plot((cont_ind_pastis))
plt.xlabel("modes")
plt.ylabel("List of Individual contrast")
plt.yscale('log')
npup = np.int(np.sqrt(luvoir.pupil_grid.x.shape[0]))
nimg = np.int(np.sqrt(luvoir.focal_det.x.shape[0]))
# Getting the flux together
sptype = 'A0V' # Put this on config
Vmag = 0.0 # Put this in loop
minlam = 500 * u.nanometer # Put this on config
maxlam = 600 * u.nanometer # Put this on config
star_flux = exoscene.star.bpgs_spectype_to_photonrate(spectype=sptype, Vmag=Vmag, minlam=minlam.value, maxlam=maxlam.value)
Nph = star_flux.value*15**2*np.sum(luvoir.apodizer**2) / npup**2
dark_current = 0 #0.000072 #es per s
CIC = 0. #0.00076 #electrons per sec
harris_mode = np.zeros(n_harris)
luvoir.harris_sm.actuators = harris_mode
nonaberrated_coro_psf, refshit,inter_ref = luvoir.calc_psf(ref=True, display_intermediate=False, return_intermediate='efield',norm_one_photon=True)
Efield_ref = nonaberrated_coro_psf.electric_field
harris_mode = np.zeros(n_harris)
luvoir.harris_sm.actuators = harris_mode
harris_ref2 = luvoir.calc_out_of_band_wfs(norm_one_photon=True)
harris_ref2_sub_real = hcipy.field.subsample_field(harris_ref2.real, z_pup_downsample, grid_zernike, statistic='mean')
harris_ref2_sub_imag = hcipy.field.subsample_field(harris_ref2.imag, z_pup_downsample, grid_zernike, statistic='mean')
Efield_ref_OBWFS = (harris_ref2_sub_real + 1j*harris_ref2_sub_imag) * z_pup_downsample
nyquist_sampling = 2.
# Actual grid for LUVOIR images
grid_test = hcipy.make_focal_grid(
luvoir.sampling,
luvoir.imlamD,
pupil_diameter=luvoir.diam,
focal_length=1,
reference_wavelength=luvoir.wvln,
)
# Actual grid for LUVOIR images that are nyquist sampled
grid_det_subsample = hcipy.make_focal_grid(
nyquist_sampling,
np.floor(luvoir.imlamD),
pupil_diameter=luvoir.diam,
focal_length=1,
reference_wavelength=luvoir.wvln,
)
n_nyquist = np.int(np.sqrt(grid_det_subsample.x.shape[0]))
### Dark hole mask
design = 'medium'
dh_outer_nyquist = hcipy.circular_aperture(2 * luvoir.apod_dict[design]['owa'] * luvoir.lam_over_d)(grid_det_subsample)
dh_inner_nyquist = hcipy.circular_aperture(2 * luvoir.apod_dict[design]['iwa'] * luvoir.lam_over_d)(grid_det_subsample)
dh_mask_nyquist = (dh_outer_nyquist - dh_inner_nyquist).astype('bool')
dh_size = len(np.where(luvoir.dh_mask != 0)[0])
dh_size_nyquist = len(np.where(dh_mask_nyquist != 0)[0])
dh_index = np.where(luvoir.dh_mask != 0)[0]
dh_index_nyquist = np.where(dh_mask_nyquist != 0)[0]
# E0_LOWFS = np.zeros([N_pup_z*N_pup_z,1,2])
# E0_LOWFS[:,0,0] = Efield_ref_LOWFS.real
# E0_LOWFS[:,0,1] = Efield_ref_LOWFS.imag
E0_OBWFS = np.zeros([N_pup_z*N_pup_z,1,2])
E0_OBWFS[:,0,0] = Efield_ref_OBWFS.real
E0_OBWFS[:,0,1] = Efield_ref_OBWFS.imag
E0_coron = np.zeros([nimg*nimg,1,2])
E0_coron[:,0,0] = Efield_ref.real
E0_coron[:,0,1] = Efield_ref.imag
E0_coron_nyquist = np.zeros([n_nyquist*n_nyquist,1,2])
tmp0 = hcipy.interpolation.make_linear_interpolator_separated(Efield_ref, grid=grid_test)
Efield_ref_nyquist = (luvoir.sampling/nyquist_sampling)**2*tmp0(grid_det_subsample)
E0_coron_nyquist[:,0,0] = Efield_ref_nyquist.real
E0_coron_nyquist[:,0,1] = Efield_ref_nyquist.imag
E0_coron_DH = np.zeros([dh_size,1,2])
E0_coron_DH[:,0,0] = Efield_ref.real[dh_index]
E0_coron_DH[:,0,1] = Efield_ref.imag[dh_index]
E0_coron_DH_nyquist = np.zeros([dh_size_nyquist,1,2])
E0_coron_DH_nyquist[:,0,0] = Efield_ref_nyquist.real[dh_index_nyquist]
E0_coron_DH_nyquist[:,0,1] = Efield_ref_nyquist.real[dh_index_nyquist]
filename_matrix = 'EFIELD_Re_matrix_n_harris_' + str(n_harris) + '.fits'
G_harris_real = fits.getdata(os.path.join(overall_dir, 'matrix_numerical', filename_matrix))
filename_matrix = 'EFIELD_Im_matrix_n_harris_' + str(n_harris) + '.fits'
G_harris_imag = fits.getdata(os.path.join(overall_dir, 'matrix_numerical', filename_matrix))
G_coron_harris_nyquist= np.zeros([n_nyquist*n_nyquist,2,n_harris])
for pp in range(0, n_harris):
tmp0 = G_harris_real[pp] + 1j*G_harris_imag[pp]
tmp1 = hcipy.interpolation.make_linear_interpolator_separated(tmp0, grid=grid_test)
tmp2 = (luvoir.sampling/nyquist_sampling)**2*tmp1(grid_det_subsample)
G_coron_harris_nyquist[:,0,pp] = tmp2.real - Efield_ref_nyquist.real
G_coron_harris_nyquist[:,1,pp] = tmp2.real - Efield_ref_nyquist.imag
G_coron_harris_DH= np.zeros([dh_size,2,n_harris])
for pp in range(0, n_harris):
G_coron_harris_DH[:,0,pp] = G_harris_real[pp,dh_index] - Efield_ref.real[dh_index]
G_coron_harris_DH[:,1,pp] = G_harris_imag[pp,dh_index] - Efield_ref.imag[dh_index]
G_coron_harris_DH_nyquist= np.zeros([dh_size_nyquist,2,n_harris])
for pp in range(0, n_harris):
tmp0 = G_harris_real[pp] + 1j*G_harris_imag[pp]
tmp1 = hcipy.interpolation.make_linear_interpolator_separated(tmp0, grid=grid_test)
tmp2 = (luvoir.sampling/nyquist_sampling)**2*tmp1(grid_det_subsample)
G_coron_harris_DH_nyquist[:,0,pp-1] = tmp2.real[dh_index_nyquist] - Efield_ref_nyquist.real[dh_index_nyquist]
G_coron_harris_DH_nyquist[:,1,pp-1] = tmp2.real[dh_index_nyquist] - Efield_ref_nyquist.imag[dh_index_nyquist]
G_coron_harris= np.zeros([nimg*nimg,2,n_harris])
for pp in range(0, n_harris):
G_coron_harris[:,0,pp] = G_harris_real[pp] - Efield_ref.real
G_coron_harris[:,1,pp] = G_harris_imag[pp] - Efield_ref.imag
start_time = time.time()
focus_fieldS = []
focus_fieldS_Re = []
focus_fieldS_Im = []
for pp in range(0, n_harris):
print(pp)
harris_modes = np.zeros(n_harris)
harris_modes[pp] = (nm_aber) / 2
luvoir.harris_sm.actuators = harris_mode
harris_meas = luvoir.calc_out_of_band_wfs(norm_one_photon=True)
harris_meas_sub_real = hcipy.field.subsample_field(harris_meas.real, z_pup_downsample, grid_zernike, statistic='mean')
harris_meas_sub_imag = hcipy.field.subsample_field(harris_meas.imag, z_pup_downsample, grid_zernike, statistic='mean')
focus_field1 = harris_meas_sub_real + 1j * harris_meas_sub_imag
focus_fieldS.append(focus_field1)
focus_fieldS_Re.append(focus_field1.real)
focus_fieldS_Im.append(focus_field1.imag)
filename_matrix = 'EFIELD_OBWFS_Re_matrix_num_harris_' + str(n_harris)
hcipy.write_fits(focus_fieldS_Re, os.path.join(resDir, filename_matrix + '.fits'))
print('Efield Real saved to:', os.path.join(resDir, filename_matrix + '.fits'))
filename_matrix = 'EFIELD_OBWFS_Im_matrix_num_harris_' + str(n_harris)
hcipy.write_fits(focus_fieldS_Im, os.path.join(resDir, filename_matrix + '.fits'))
print('Efield Imag saved to:', os.path.join(resDir, filename_matrix + '.fits'))
filename_matrix = 'EFIELD_OBWFS_Re_matrix_num_harris_' + str(n_harris)+'.fits'
G_OBWFS_real = fits.getdata(os.path.join(overall_dir, 'matrix_numerical', filename_matrix))
filename_matrix = 'EFIELD_OBWFS_Im_matrix_num_harris_' + str(n_harris)+'.fits'
G_OBWFS_imag = fits.getdata(os.path.join(overall_dir, 'matrix_numerical', filename_matrix))
G_OBWFS= np.zeros([N_pup_z*N_pup_z,2,n_harris])
for pp in range(0, n_harris):
G_OBWFS[:,0,pp] = G_OBWFS_real[pp]*z_pup_downsample - Efield_ref_OBWFS.real
G_OBWFS[:,1,pp] = G_OBWFS_imag[pp]*z_pup_downsample - Efield_ref_OBWFS.imag
def req_closedloop_calc_recursive(Gcoro, Gsensor, E0coro, E0sensor, Dcoro, Dsensor, t_exp, flux, Q, Niter, dh_mask,
norm):
P = np.zeros(Q.shape) # WFE modes covariance estimate
r = Gsensor.shape[2]
N = Gsensor.shape[0]
N_img = Gcoro.shape[0]
c = 1
# Iterations of ALGORITHM 1
contrast_hist = np.zeros(Niter)
intensity_WFS_hist = np.zeros(Niter)
cal_I_hist = np.zeros(Niter)
eps_hist = np.zeros([Niter, r])
averaged_hist = np.zeros(Niter)
contrasts = []
for pp in range(Niter):
eps = np.random.multivariate_normal(np.zeros(r), P + Q * t_exp).reshape((1, 1, r)) # random modes
G_eps = np.sum(Gsensor * eps, axis=2).reshape((N, 1, 2 * c)) + E0sensor # electric field
G_eps_squared = np.sum(G_eps * G_eps, axis=2, keepdims=True)
G_eps_G = np.matmul(G_eps, Gsensor)
G_eps_G_scaled = G_eps_G / np.sqrt(G_eps_squared + Dsensor / flux / t_exp) # trick to save RAM
cal_I = 4 * flux * t_exp * np.einsum("ijk,ijl->kl", G_eps_G_scaled, G_eps_G_scaled) # information matrix
P = np.linalg.inv(np.linalg.inv(P + Q * t_exp / 2) + cal_I)
# P = np.linalg.inv(cal_I)
# Coronagraph
G_eps_coron = np.sum(Gcoro * eps, axis=2).reshape((N_img, 1, 2 * c)) + E0coro
G_eps_coron_squared = np.sum(G_eps_coron * G_eps_coron, axis=2, keepdims=True)
intensity = G_eps_coron_squared * flux * t_exp + Dcoro
# Wavefront sensor
intensity_WFS = G_eps_squared * flux * t_exp + Dsensor
# Archive
test_DH0 = intensity[:, 0, 0] * luvoir.dh_mask
test_DH = np.mean(test_DH0[np.where(test_DH0 != 0)])
contrasts.append(test_DH / flux / t_exp / norm)
intensity_WFS_hist[pp] = np.sum(intensity_WFS) / flux
cal_I_hist[pp] = np.mean(cal_I) / flux
eps_hist[pp] = eps
averaged_hist[pp] = np.mean(contrasts)
# print("est. contrast", np.mean(contrasts))
outputs = {'intensity_WFS_hist': intensity_WFS_hist,
'cal_I_hist': cal_I_hist,
'eps_hist': eps_hist,
'averaged_hist': averaged_hist,
'contrasts': contrasts}
return outputs
def req_closedloop_calc_batch(Gcoro, Gsensor, E0coro, E0sensor, Dcoro, Dsensor, t_exp, flux, Q, Niter, dh_mask, norm):
P = np.zeros(Q.shape) # WFE modes covariance estimate
r = Gsensor.shape[2]
N = Gsensor.shape[0]
N_img = Gcoro.shape[0]
c = 1
# Iterations of ALGORITHM 1
contrast_hist = np.zeros(Niter)
intensity_WFS_hist = np.zeros(Niter)
cal_I_hist = np.zeros(Niter)
eps_hist = np.zeros([Niter, r])
averaged_hist = np.zeros(Niter)
contrasts = []
for pp in range(Niter):
eps = np.random.multivariate_normal(np.zeros(r), P + Q * t_exp).reshape((1, 1, r)) # random modes
G_eps = np.sum(Gsensor * eps, axis=2).reshape((N, 1, 2 * c)) + E0sensor # electric field
G_eps_squared = np.sum(G_eps * G_eps, axis=2, keepdims=True)
G_eps_G = np.matmul(G_eps, Gsensor)
G_eps_G_scaled = G_eps_G / np.sqrt(G_eps_squared + Dsensor / flux / t_exp) # trick to save RAM
cal_I = 4 * flux * t_exp * np.einsum("ijk,ijl->kl", G_eps_G_scaled, G_eps_G_scaled) # information matrix
# P = np.linalg.inv(np.linalg.inv(P+Q*t_exp/2) + cal_I)
P = np.linalg.pinv(cal_I)
# Coronagraph
G_eps_coron = np.sum(Gcoro * eps, axis=2).reshape((N_img, 1, 2 * c)) + E0coro
G_eps_coron_squared = np.sum(G_eps_coron * G_eps_coron, axis=2, keepdims=True)
intensity = G_eps_coron_squared * flux * t_exp + Dcoro
# Wavefront sensor
intensity_WFS = G_eps_squared * flux * t_exp + Dsensor
# Archive
test_DH0 = intensity[:, 0, 0] * luvoir.dh_mask
test_DH = np.mean(test_DH0[np.where(test_DH0 != 0)])
contrasts.append(test_DH / flux / t_exp / norm)
intensity_WFS_hist[pp] = np.sum(intensity_WFS) / flux
cal_I_hist[pp] = np.mean(cal_I) / flux
eps_hist[pp] = eps
averaged_hist[pp] = np.mean(contrasts)
# print("est. contrast", np.mean(contrasts))
# print("est. contrast", np.mean(contrasts))
outputs = {'intensity_WFS_hist': intensity_WFS_hist,
'cal_I_hist': cal_I_hist,
'eps_hist': eps_hist,
'averaged_hist': averaged_hist,
'contrasts': contrasts}
return outputs
flux = Nph
Qharris = np.diag(np.asarray(mu_map_harris**2))
# Running a bunch of tests for time series
Ntimes = 20
TimeMinus = -2
TimePlus = 3.5
Nwavescale = 8
WaveScaleMinus = -2
WaveScalePlus = 1
Nflux = 3
fluxPlus = 10
fluxMinus = 0
timeVec = np.logspace(TimeMinus,TimePlus,Ntimes)
WaveVec = np.logspace(WaveScaleMinus,WaveScalePlus,Nwavescale)
fluxVec = np.linspace(fluxMinus,fluxPlus,Nflux)
wavescaleVec = np.logspace(WaveScaleMinus,WaveScalePlus,Nwavescale)
niter = 10
print('harris modes with batch OBWFS and noise')
timer1 = time.time()
wavescale = 1.
StarMag = 1.0
result_1 = []
for tscale in np.logspace(TimeMinus, TimePlus, Ntimes):
Starfactor = 10**(-StarMag/2.5)
print(tscale)
tmp0 = req_closedloop_calc_batch(G_coron_harris, G_OBWFS, E0_coron, E0_OBWFS, dark_current+CIC/tscale,
dark_current+CIC/tscale, tscale, flux*Starfactor, wavescale**2*Qharris,
niter, luvoir.dh_mask, norm)
tmp1 = tmp0['averaged_hist']
n_tmp1 = len(tmp1)
result_1.append(tmp1[n_tmp1-1])
timer2 = time.time()
print(timer2 - timer1)
plt.figure(figsize=(35,10))
# plt.rcParams["font.size"] = 16
# plt.rcParams["axes.labelsize"] = 22
# plt.rcParams["axes.labelweight"] = "bold"
# plt.rcParams["xtick.labelsize"] = 16
# plt.rcParams["ytick.labelsize"] = 16
# plt.rcParams["legend.fontsize"] = 16
# plt.rcParams["figure.titlesize"] = 16
plt.subplot(1,3,1)
plt.title("Faceplates Silvered")
hcipy.imshow_field((nu_maps[0])*1e12, cmap = 'RdBu',vmin=-15, vmax=15)
cbar = plt.colorbar()
cbar.set_label("pm")
plt.subplot(1,3,2)
plt.title("Bulk")
hcipy.imshow_field((nu_maps[1])*1e12, cmap = 'RdBu',vmin=-25, vmax=5)
cbar = plt.colorbar()
cbar.set_label("pm")
plt.subplot(1,3,3)
plt.title("Gradient Radial")
hcipy.imshow_field((nu_maps[2])*1e12, cmap = 'RdBu',vmin=-15, vmax=15)
cbar = plt.colorbar()
cbar.set_label("pm")
plt.figure(figsize=(20,10))
plt.subplot(1,2,1)
plt.title("Segment Level 1mk gradient X lateral")
hcipy.imshow_field((nu_maps[3])*1e12, cmap = 'RdBu',vmin=-10, vmax=10)
cbar = plt.colorbar()
cbar.set_label("pm")
plt.subplot(1,2,2)
plt.title("Segment level 1mk gradient Z axial")
hcipy.imshow_field((nu_maps[4])*1e12,cmap = 'RdBu',vmin=-15, vmax=15)
cbar = plt.colorbar()
cbar.set_label("pm")
five_vec_1 = np.zeros(120)
five_vec_2 = np.zeros(120)
five_vec_3 = np.zeros(120)
five_vec_4 = np.zeros(120)
five_vec_5 = np.zeros(120)
j = -1
for i in range (0,596,5):
j = j+1
print("i---",i,"j---",j)
five_vec_1[j]=mu_map_harris[i]
five_vec_2[j]=mu_map_harris[i+1]
five_vec_3[j]=mu_map_harris[i+2]
five_vec_4[j]=mu_map_harris[i+3]
five_vec_5[j]=mu_map_harris[i+4]
luvoir2 = LuvoirA_APLC(optics_input, coronagraph_design, sampling)
luvoir2.create_segmented_mirror(1)
luvoir2.sm.actuators = five_vec_1
luvoir3 = LuvoirA_APLC(optics_input, coronagraph_design, sampling)
luvoir3.create_segmented_mirror(1)
luvoir3.sm.actuators = five_vec_2
luvoir4 = LuvoirA_APLC(optics_input, coronagraph_design, sampling)
luvoir4.create_segmented_mirror(1)
luvoir4.sm.actuators = five_vec_3
luvoir5 = LuvoirA_APLC(optics_input, coronagraph_design, sampling)
luvoir5.create_segmented_mirror(1)
luvoir5.sm.actuators = five_vec_4
luvoir6 = LuvoirA_APLC(optics_input, coronagraph_design, sampling)
luvoir6.create_segmented_mirror(1)
luvoir6.sm.actuators = five_vec_5
plt.figure(figsize =(35,10))
plt.subplot(1,3,1)
plt.title("Segment Level 1mk Faceplates Silvered")
hcipy.imshow_field((luvoir2.sm.surface)*1000, cmap = 'RdBu')
cbar = plt.colorbar()
cbar.set_label("mK")
plt.subplot(1,3,2)
plt.title("Segment Level 1mk bulk")
hcipy.imshow_field((luvoir3.sm.surface)*1000, cmap = 'RdBu')
cbar = plt.colorbar()
cbar.set_label("mK")
plt.subplot(1,3,3)
plt.title("Segment Level 1mk gradient radial")
hcipy.imshow_field((luvoir4.sm.surface)*1000, cmap = 'RdBu')
cbar = plt.colorbar()
cbar.set_label("mK")
plt.figure(figsize =(25,10))
plt.subplot(1,2,1)
plt.title("Segment Level 1mk gradient X lateral ")
hcipy.imshow_field((luvoir5.sm.surface)*1000, cmap = 'RdBu')
cbar = plt.colorbar()
cbar.set_label("mK")
plt.subplot(1,2,2)
plt.title("Segment level 1mk gradient Z axial")
hcipy.imshow_field((luvoir6.sm.surface)*1000, cmap = 'RdBu')
cbar = plt.colorbar()
cbar.set_label("mK")
five_vec_1?
# niter = 3
# print('harris modes with batch OBWFS and noise')
# timer1 = time.time()
# res = np.zeros([Ntimes, Nwavescale, Nflux, 1])
# pp = 0
# for tscale in np.logspace(TimeMinus, TimePlus, Ntimes):
# qq = 0
# print(tscale)
# for wavescale in np.logspace(WaveScaleMinus, WaveScalePlus, Nwavescale):
# rr = 0
# for StarMag in np.linspace(fluxMinus, fluxPlus, Nflux):
# Starfactor = 10**(-StarMag/2.5)
# tmp0 = req_closedloop_calc_batch(G_coron_harris, G_OBWFS, E0_coron, E0_OBWFS, dark_current+CIC/tscale,
# dark_current+CIC/tscale, tscale, flux*Starfactor, wavescale**2*Qharris,
# niter, luvoir.dh_mask, norm)
# tmp1 = tmp0['averaged_hist']
# n_tmp1 = len(tmp1)
# res[pp,qq,rr] = np.mean(tmp1[np.int(n_tmp1/2):n_tmp1]) - contrast_floor
# rr = rr + 1
# qq = qq + 1
# pp = pp + 1
# res_line = np.reshape(res,[Ntimes*Nwavescale*Nflux])
# text_files_name = os.path.join(overall_dir, f'harris_OBWFS_Batch_dark_{dark_current}_CIC_{CIC}.csv')
# np.savetxt(text_files_name, res_line, delimiter=",")
# timer2 = time.time()
# print(timer2 - timer1)
# print('harris modes with recursive OBWFS and noise')
# timer1 = time.time()
# res = np.zeros([Ntimes, Nwavescale, Nflux, 1])
# pp = 0
# for tscale in np.logspace(TimeMinus, TimePlus, Ntimes):
# qq = 0
# print(tscale)
# for wavescale in np.logspace(WaveScaleMinus, WaveScalePlus, Nwavescale):
# rr = 0
# for StarMag in np.linspace(fluxMinus,fluxPlus,Nflux):
# Starfactor = 10**(-StarMag/2.5)
# tmp0 = req_closedloop_calc_recursive(G_coron_harris, G_OBWFS, E0_coron, E0_OBWFS, dark_current+CIC/tscale,
# dark_current+CIC/tscale, tscale, flux*Starfactor, wavescale**2*Qharris,
# niter, luvoir.dh_mask, norm)
# tmp1 = tmp0['averaged_hist']
# n_tmp1 = len(tmp1)
# res[pp,qq,rr] = np.mean(tmp1[np.int(n_tmp1/2):n_tmp1]) - contrast_floor
# rr = rr + 1
# qq = qq + 1
# pp = pp + 1
luvoir2.sm.surface?
# wf_active_pupil = wf_aper
# wf_active_pupil = harris_sm(wf_active_pupil)
# wf_harris_sm = harris_sm(wf_aper)
# hcipy.imshow_field(wf_active_pupil.phase)
# hcipy.imshow_field(wf_harris_sm.phase)
# hcipy.imshow_field(wf_aper.phase)
# # All E-field propagations
# wf_dm1_coro = hcipy.Wavefront(wf_active_pupil.electric_field * np.exp(4 * 1j * np.pi/wvln * self.DM1), self.wavelength)
# wf_dm2_coro_before = fresnel(wf_dm1_coro)
# wf_dm2_coro_after = hcipy.Wavefront(wf_dm2_coro_before.electric_field * np.exp(4 * 1j * np.pi / self.wavelength * self.DM2) * self.DM2_circle, self.wavelength)
# wf_back_at_dm1 = self.fresnel_back(wf_dm2_coro_after)
# wf_apod_stop = hcipy.Wavefront(wf_back_at_dm1.electric_field * self.apod_stop, self.wavelength)
# wf_before_lyot = self.coro(wf_apod_stop)
# wf_lyot = self.lyot_stop(wf_before_lyot)
# wf_lyot.wavelength = self.wavelength
# wf_im_coro = self.prop(wf_lyot)
# wf_im_ref = self.prop(wf_back_at_dm1)
###Output
_____no_output_____ |
analysis/.ipynb_checkpoints/reweight_heatmaps_midway3-xxx-checkpoint.ipynb | ###Markdown
load vmd and fix pbcs manually by running the following tk:pbc unwrap -allpbc join connected -allpbc wrap -center com -centersel "segname DNA1" -compound residue -all
###Code
# load vmd
print(f'vmd {top_psf} {save_lammpstrj}')
!vmd {top_psf} {save_lammpstrj}
###Output
vmd ../11bps/CCTATATATCC/in00_cvmd_base-4_strand-1.psf temp_lammpstrjs/CCTATATATCC_b4_10x5.lammpstrj
/software/vmd-1.9.3-el8-x86_64/lib/vmd/vmd_LINUXAMD64: /lib64/libGL.so.1: no version information available (required by /software/vmd-1.9.3-el8-x86_64/lib/vmd/vmd_LINUXAMD64)
Info) VMD for LINUXAMD64, version 1.9.3 (November 30, 2016)
Info) http://www.ks.uiuc.edu/Research/vmd/
Info) Email questions and bug reports to [email protected]
Info) Please include this reference in published work using VMD:
Info) Humphrey, W., Dalke, A. and Schulten, K., `VMD - Visual
Info) Molecular Dynamics', J. Molec. Graphics 1996, 14.1, 33-38.
Info) -------------------------------------------------------------
Info) Multithreading available, 96 CPUs detected.
Info) CPU features: SSE2 AVX AVX2 FMA KNL:AVX-512F+CD+ER+PF
Info) Free system memory: 67GB (35%)
Info) No CUDA accelerator devices available.
Warning) Detected X11 'Composite' extension: if incorrect display occurs
Warning) try disabling this X server option. Most OpenGL drivers
Warning) disable stereoscopic display when 'Composite' is enabled.
Info) OpenGL renderer: llvmpipe (LLVM 9.0.0, 256 bits)
Info) Features: STENCIL MDE CVA MTX NPOT PP PS GLSL(OVF)
Info) Full GLSL rendering mode is available.
Info) Textures: 2-D (8192x8192), 3-D (512x512x512), Multitexture (8)
Info) Dynamically loaded 2 plugins in directory:
Info) /software/vmd-1.9.3-el8-x86_64/lib/vmd/plugins/LINUXAMD64/molfile
Info) File loading in progress, please wait.
Info) Using plugin psf for structure file ../11bps/CCTATATATCC/in00_cvmd_base-4_strand-1.psf
psfplugin) WARNING: no impropers defined in PSF file.
psfplugin) no cross-terms defined in PSF file.
Info) Analyzing structure ...
Info) Atoms: 63
Info) Bonds: 61
Info) Angles: 94 Dihedrals: 36 Impropers: 0 Cross-terms: 0
Info) Bondtypes: 0 Angletypes: 0 Dihedraltypes: 0 Impropertypes: 0
Info) Residues: 22
Info) Waters: 0
Info) Segments: 2
Info) Fragments: 2 Protein: 0 Nucleic: 0
lammpsplugin) New style dump with 5 data fields. Coordinate data flags: 0x08
lammpsplugin) Using absolute atomic coordinates directly.
Info) Using plugin lammpstrj for coordinates from file temp_lammpstrjs/CCTATATATCC_b4_10x5.lammpstrj
Info) Finished with coordinate file temp_lammpstrjs/CCTATATATCC_b4_10x5.lammpstrj.
vmd > |
Time Series Analysis Labs/Castaneda_Juan_Lab1.ipynb | ###Markdown
A Discrete Convolution Program (5 pts) Write a discrete convolution function `myConv` that convolves two arrays {$f_i, i = 0, \dots , N_f-1$}and {$w_j, j = 0, \dots , N_w-1$} to obtain an output time series {$g_n$}. For simplicity, assume a fixed sampling interval $\Delta = 1$, and further, that $f$ and $w$ are $0$ outside of their sampled regions. 1. How long is {$g_n$}? In other words, how many non-zero points can it have? Justify your answer. 2. Please copy and paste your function `g = myConv(f, w)` to the PDF report. 3. Provide a test to convince yourself (and me) that your function agrees with `numpy.convolve`. For example, generate two random timeseries $f$, $w$ with $N_f=50$, $N_w=100$, drawing each element from $U[0,1]$, and plot the difference between your function's output and numpy's. Include the code for your test in the PDF report. 4. Compare the speed of your `myConv` function to the NumPy function. Provide a plot of the comparison, and include your python code in the PDF report. Is your function faster or slower than the NumPy function? Can you suggest why that is the case?_Hint_: For the speed test part, make up your own $f_i$ and $w_j$ time series, and for simplicity, study the casesof $N_f = N_w = 10, 100, 1000, 10000$. To accurately time each computation of the convolution function, import thetime module and place calls to `time.time` around your code:```import timet1 = time.time()g = myConv(f, w)t2 = time.time()print(t2-t1)```Alternatively, use the `timeit` module:```import timeitprint(timeit.timeit('g = myConv(f, w)', number=10000))``` 1. The number of non-zero points $g_n$ can have is $N_f$ + $N_w$ - 1. This is because we are essentially "sliding" one array (after flipping it) over the other one to do a convolution (the convolution will have a value of zero everywhere else). The first time these arrays will overlap is when the last element of the flipped array (the one we "slide") overlaps with the first element of the other array. The last time they overlap is when the first element of the flipped array overlaps with the last element of the other array.Lets call F the flipped array we slide over W.From the first time the arrays overlap until the last element of F no longer overlaps with any element in W, we perform $N_w$ "slides". Then all that is left is the number of slides until the first element in F overlaps with the last element of W. This number of slides is $N_f$-1 (since the last element of F is already "past" W). Thus the total number of non-zero values in the convolution $g_n$ is $N_w$ + $N_f$ - 1. Note that a "proof" of this can be found in the course lecture slides 2. Note: I did this question on my own prior to the lecture on discrete convolution, so I implemented the convolution type 'same' (ie the result is of the same length as the longer of the two arrays).After the lecture I realized that the convolution we are asked for is 'full' convolution (of length given in part 1 above) so I implemented that as well. The 'full' convolution is the function called myConv, and the 'same' convolution is called myConv1.
###Code
def myConv1(W, F, delta=1):
'''
Returns W∗F, the convolution of array W with array F such that the result is the same size as F
ie. 'same' convolution
Assumes F is the larger array. If W is larger, swap the arrays
Uses sampling interval given by delta.
'''
# make sure W is the smaller array
if len(W) > len(F):
W, F = F, W
# If smaller array is even in length, 'prepend' a zero to it (seems to be what np.convolve does)
if len(W)%2==0:
W = np.insert(W, 0, 0)
# Zero-pad F based on the size of W
k = np.shape(W)[0]
padWidth = int((k-1)/2)
F = np.pad(F, (padWidth, padWidth), 'constant')
# initialize matrix for the result of convolution
result = np.zeros(np.shape(F))
# Flip the array so that you can just apply correlation (convolution is correlation but with the "filter" flipped in all dimensions)
W = W[::-1]
# Perform correlation
for i in range(padWidth, F.shape[0]-padWidth):
subArray = F[i-padWidth:i+padWidth+1]
result[i] = np.dot(W, subArray)
# scale the result by the sampling rate:
result = delta*result
# Remove the padding?
result = result[padWidth:result.shape[0]-padWidth]
return result
def myConv(W, F, delta=1):
'''
Returns W∗F, the convolution of array W with array F such that the result is of length len(F) + len(W) - 1
ie. 'full' convolution
Uses sampling interval given by delta.
'''
evenW = len(W)%2==0
# If smaller array is even in length, 'prepend' a zero to it (seems to be what np.convolve does)
if evenW:
W = np.insert(W, 0, 0)
# initialize matrix for the result of convolution
result = np.zeros(len(F) + len(W) - 1)
# Zero-pad F based on the size of W
k = np.shape(W)[0]
padWidth = int((k-1))
F = np.pad(F, (padWidth, padWidth), 'constant')
# Flip the array so that you can just apply correlation (convolution is correlation but with the "filter" flipped in all dimensions)
W = W[::-1]
# Perform correlation
for i in range(padWidth//2, len(F)-len(W)//2):
subArray = F[i-padWidth//2:i+padWidth//2+1]
result[i-len(W)//2] = np.dot(W, subArray)
# scale the result by the sampling rate:
result = delta*result
# removing the extra zero that results from prepending a zero to W
if evenW:
result = result[1:]
return result
# Quick test to check that both my convolution functions work
a = np.array([1, 0, 0, 0])
b = np.array([1, 2, 3, 4, 5, 6])
print(np.convolve(a, b, 'same'))
print(myConv1(a, b, 1))
print(np.convolve(a, b))
print(myConv(a, b, 1))
###Output
[2 3 4 5 6 0]
[2. 3. 4. 5. 6. 0.]
[1 2 3 4 5 6 0 0 0]
[1. 2. 3. 4. 5. 6. 0. 0. 0.]
###Markdown
3.
###Code
F = np.random.rand(50)
W = np.random.rand(100)
print("Notice that the difference between numpy.convolve and myConv is very small: ")
print(np.linalg.norm(np.convolve(F, W) - myConv(F, W)))
plt.plot(np.convolve(F, W))
plt.title("numpy convolution function")
plt.xlabel("n")
plt.ylabel("G1_n")
plt.show()
plt.plot(myConv(F, W))
plt.title("My convolution function")
plt.xlabel("n")
plt.ylabel("G2_n")
plt.show()
plt.plot(np.convolve(F, W) - myConv(F, W))
plt.title("Difference between the two \n convolution functions")
plt.xlabel("n")
plt.ylabel("G1_n - G2_n")
plt.show()
###Output
Notice that the difference between numpy.convolve and myConv is very small:
5.1598503419391096e-15
###Markdown
4.
###Code
import time
numpyConvTimes=[]
myConvTimes=[]
N = [10, 100, 1000, 10000]
for n in N:
F = np.random.rand(n)
W = np.random.rand(n)
t1 = time.time()
g = np.convolve(F, W, 'same')
t2 = time.time()
numpyConvTimes.append(t2-t1)
t1 = time.time()
g = myConv(F, W)
t2 = time.time()
myConvTimes.append(t2-t1)
plt.plot(N, numpyConvTimes, 'r.', label="np.convolve")
plt.plot(N, myConvTimes, 'b+', label="myConv")
plt.xscale('log')
plt.title("Speed comparison between the \n two convolution functions")
plt.xlabel("N (length of the two arrays being convolved)")
plt.ylabel("Time (s)")
plt.legend(loc="upper left")
plt.show()
###Output
_____no_output_____
###Markdown
My function is slower than the numpy function. One possible reason is that the numpy function might be completely vectorized and has no for loop(s). Simple Physical System: RL Circuit Response (8 pts) Consider a simple physical system consisting of a resistor (with resistance `R`) and an inductor (withinductance `L`) in series. We apply an input voltage $a(t)$ across the pair in series, and measure the outputvoltage $b(t)$ across the inductor alone. For this linear system, 1. Show analytically that its step response (i.e., the $b(t)$ we obtain when the input voltage $a(t) = H(t)$, the Heaviside function) is given by $$ S(t) = e^{-Rt/L} H(t), $$ and its impulse response (i.e., the output voltage $b(t)$ when $a(t) = \delta(t)$) is given by $$ R(t) = \delta(t) - \frac{R}{L} e^{-Rt/L} H(t). $$ _Hint_: Construct and solve the ODE relating the voltages under consideration. Consider the two $b(t)$ choices to derive $S(t)$ and $R(t)$. Formulas $\frac{d}{dt} H(t) = \delta(t)$ and $\delta(t) f(t) = \delta(t) f(0)$ may help. 2. Discretize the impulse response $R(t)$ function, realizing that $H(t)$ should be discretized as $$H = [0.5, 1, 1, \dots],$$ and $\delta(t)$ should be discretized as $$D = [1/dt, 0, 0, \dots].$$ Take advantage of your `myConv` function, or the NumPy built-in function `convolve`, and write your own Python function `V_out = RLresponse(R,L,V_in,dt)` to take an input series $V_{in}$ sampled at $\Delta = dt$, and calculate the output series $V_{out}$ sampled by the same $dt$. Please paste your Python function here. (Hint: here $\Delta$ may not be 1, so remember to build the multiplication of $\Delta$ into your convolution function.) 3. Using $R = 850 \Omega$, $L = 2 H$, and sampling period $dt = 0.20 \text{ ms}$, test your RL-response function with {$H_n$} series (discretized $H(t)$) as input, and plot the output time series (as circles) on top of the theoretical curve $S(t)$ given by part 1 (as a solid line). Repeat this for {$D_n$} (discretized $\delta(t)$) and $R(t)$. Make the time range of the plots 0 to at least 20 ms. Please list your Python code here. Note: See end of PDF for question 2.1 2.2 & 2.3:
###Code
#R is the weighting function of the system because it is the output we get from sending in a pulse as the input.
#Therefore, we can now use R (discretized) as the weighting function and do a covolution with V_in to get V_out
def RLresponse(r, L, V_in, dt):
n = len(V_in)
t = np.arange(0, 0.02, 0.0002) # 20 mileseconds
# create step and delta functions of the same length as V_in
H = np.ones(n)
H[0] = 0.5
D = np.zeros(n)
D[0] = 1.0/dt
# Weight function (discretized R)
R = D - (r/L)*np.exp((-1*r*t)/L)*H
# convolve V_in and the weight function
return np.convolve(R, V_in, mode='same')*dt
# Constants
r = 850.0
L = 2.0
dt = 0.0002 # 0.2 ms = 2 x 10^-4 s
n = 100 # want 0-20 ms, spaced 0.2 ms apart, so need 100 points
# input function is step function
v_in1 = np.ones(n)
v_in1[0] = 0.5
# second input function is delta function
v_in2 = np.zeros(n)
v_in2[0] = 1.0/dt
# time
t = np.arange(0, 0.02, dt)
plt.plot(t, v_in1)
plt.title("Step Function")
plt.ylabel("V")
plt.xlabel("Time (s)")
plt.show()
plt.plot(t, v_in2)
plt.title("Delta Function")
plt.ylabel("V")
plt.xlabel("Time (s)")
plt.show()
# Theoretical step response
H = v_in1[:]
D = v_in2[:]
S = np.exp(-1.0*r*t/L)*H
# Step response with my function
V_out1 = RLresponse(r, L, v_in1, dt)
# plotting
plt.plot(t, S, label='Theoretical')
plt.plot(t, V_out1, 'r.', label='calculated using RLresponse()')
plt.title("Theoretical and calculated step response S(t)")
plt.xlabel("Time (s)")
plt.ylabel("V")
plt.legend(loc="upper right")
plt.show()
plt.plot(t, S, label='Theoretical')
plt.plot(t-0.01, -V_out1, label='RLresponse() and shifted')
plt.xlabel("Time (s)")
plt.ylabel("V")
plt.title("Theoretical and calculated step response S(t) \n after flipping and shifting the calculated curve")
plt.legend(loc="upper right")
plt.show()
# Theoretical impulse response
R = D - (r/L)*np.exp((-1*r*t)/L)*H
# Impulse response with my function
V_out2 = RLresponse(r, L, v_in2, dt)
# plotting
plt.plot(t, R, label='Theoretical')
plt.plot(t, V_out2, 'r.', label='calculated using RLresponse()')
plt.title("Theoretical and calculated impulse response R(t)")
plt.xlabel("Time (s)")
plt.ylabel("V")
plt.legend(loc="upper right")
plt.show()
plt.plot(t, V_out2, 'r.')
plt.title("Calculated impulse response R(t)")
plt.xlabel("Time (s)")
plt.ylabel("V")
plt.show()
###Output
_____no_output_____
###Markdown
NoteObviously I didn't get this part correct. If you could provide any input as to where I went wrong I would really appreciate it. For step response:It seems strange to me that the output of my function overlaps so well with the theoretical curve after I flipped and shifted it. I used mode='same' for np.convolve so that the output of RLresponse would be the same length as the theoretical curve (as in question 3), but this removes a large part of the graph (below I included what it looks like if I use mode='full').For impulse response:When I used 'full' convolution, the impulse response was the same as the theoretical one (see graph below)
###Code
#R is the weighting function of the system because it is the output we get from sending in a pulse as the input.
#Therefore, we can now use R (discretized) as the weighting function and do a covolution with V_in to get V_out
def RLresponse2(r, L, V_in, dt):
n = len(V_in)
t = np.arange(0, 0.02, 0.0002) # 20 mileseconds
# create step and delta functions of the same length as V_in
H = np.ones(n)
H[0] = 0.5
D = np.zeros(n)
D[0] = 1.0/dt
# Weight function (discretized R)
R = D - (r/L)*np.exp((-1*r*t)/L)*H
# convolve V_in and the weight function
return np.convolve(R, V_in, mode='full')*dt
# Step response with my function
V_out1 = RLresponse2(r, L, v_in1, dt)
# plotting
plt.plot(V_out1)
plt.title("Calculated step response S(t) using 'full' convolution")
plt.show()
# Impulse response with my function
V_out2 = RLresponse2(r, L, v_in2, dt)
# plotting
plt.plot(V_out2)
plt.title("Calculated impulse response R(t) using 'full' convolution"
"\n Note that this is identical to the theoretical curve")
plt.show()
###Output
_____no_output_____
###Markdown
Convolution of Synthetic Seismograms (5 pts) Numerical simulations of seismic wave propagation can now be routinely done for [global and regionalearthquakes](http://global.shakemovie.princeton.edu/). For a recent southern Pakistan earthquake (Jan 18,2011, 20:23:33 UTC), raw vertical synthetic seismogram (i.e., displacement field simulated at a seismicstation) for station `RAYN` (Ar Rayn, Saudi Arabia) is provided (`RAYN.II.LHZ.sem`). A common practice inseismology is to convolve synthetic seismograms with a Gaussian function$$ g(t) = \frac{1}{\sqrt{\pi}t_H} e^{-(t/t_H)^2} $$to reflect either the time duration of the event or the accuracy of the numerical simulation. 1. Provide two plots. Plot 1: the raw synthetic seismogram for station `RAYN` between 0 and 800 seconds. Plot 2: Gaussian functions with half duration $t_H = 10 \text{ sec}$ and $t_H = 20 \text{ sec}$ (include a legend). For the gaussians, use the same timestep $dt$ as the seismogram data. 2. Use numpy's convolve function to convolve the raw timeseries with a Gaussian function (both $t_H = 10$ and $t_H = 20$ cases). Plot the raw data and the two convolved time series between 0 and 800 seconds on the same graph (include a legend) and comment on your results.__Hints__* The raw synthetics `RAYN.II.LHZ.sem` is given as a text file with two columns: time in seconds anddisplacement in meters.* Gaussian functions quickly decay to zero beyond $[-3t_H, 3t_H ]$, therefore it is sufficient to sample $g(t)$within this interval.* Use `mode='same'` when calling numpy convolve to truncate the convolution to the max of the supplied arrays (i.e. length of the raw timeseries in our case). This is convenient, since we want to compare the convolution output to the original timeseries. Alternatively, use the default mode (`'full'`) and truncate the output manually. * As a check for part 2, ensure that your convolved timeseries is aligned with (or "overlaps") the raw data timeseries.
###Code
data = np.loadtxt("RAYN.II.LHZ.sem").T
data.shape
plt.plot(data[0], data[1])
plt.xlim([0,800])
plt.title("Station RAYN Raw Synthetic Seismogram")
plt.xlabel("Time (s)")
plt.ylabel("Displacement (m)")
plt.show()
def Gaussian(tH, delta):
t = np.arange(-3*tH, 3*tH, delta)
return t, (np.exp(-1.0*((t/tH)**2)))/(tH*np.sqrt(np.pi))
# get delta from seismogram data
delta = data[0,1]-data[0,0]
# plot gaussians for tH=10 and tH=20
t1, g1 = Gaussian(10, delta)
t2, g2 = Gaussian(20, delta)
plt.plot(t1, g1, label="tH = 10 seconds")
plt.plot(t2, g2, label="tH = 20 seconds")
plt.title("Gaussian with different half durations ")
plt.xlabel("Time (s)")
plt.legend(loc="upper left")
plt.show()
convolution1 = np.convolve(data[1], g1, 'same')*delta
convolution2 = np.convolve(data[1], g2, 'same')*delta
plt.plot(data[0], data[1], 'y',label='raw signal')
plt.plot(data[0], convolution1, 'b',label='signal*gaussian (tH=10 s)')
plt.plot(data[0], convolution2, 'r', label='signal*gaussian (tH=20 s)')
plt.xlim([0,800])
plt.legend(loc="upper left")
plt.title("Station RAYN Seismogram Convolved With Gaussians")
plt.xlabel("Time (s)")
plt.ylabel("Displacement (m)")
plt.show()
###Output
_____no_output_____ |
.ipynb_checkpoints/Dynamics_lab03_diffusion1D-checkpoint.ipynb | ###Markdown
AG Dynamics of the Earth Jupyter notebooks Georg Kaufmann Dynamic systems: 3. Continuity Diffusion equation in 1D----*Georg Kaufmann,Geophysics Section,Institute of Geological Sciences,Freie Universität Berlin,Germany* ----In this notebook, we solve a simple **transient diffusion equation** in one dimension,using different numerical methods.
###Code
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
---- Theoretical modelWe start defining the transient diffusion equation. We describe a quantity $c(x,t)$ in an incompressible fluid,which changes its position only through diffusion of the particle quantity:$$\fbox{$\frac{\partial c(x,t)}{\partial t} - D \frac{\partial^2 c(x,t)}{\partial x^2} = 0$}$$with$x$ [m] the position,$t$ [s] time,$D$ [m$^2$/s] the diffusivity, and$\Delta t$ [s] the time increment.The transient advection equation is a **hyperbolic partial differential equation**, which has a uniquesolution for the **initial condition**:$$c(x,t=0) = c_0(x,0)$$ ---- Gaussian functionWe define the `Gaussian` function with mean $\mu$ and standard deviation $\sigma$:$$c(x,t) = e^{-\frac{(x-vt-\mu)^2}{\sigma^2}}$$which is a solution of the transient diffusion equation.
###Code
def gaussian(x,v=0.,t=0.,mu=5.,sigma=1.):
f = np.exp(-(x-v*t-mu)**2/(sigma)**2)
return f
xstep = 101
x = np.linspace(0,10,xstep)
u0 = gaussian(x)
plt.figure(figsize=(10,6))
plt.xlabel('x')
plt.ylabel('c')
plt.grid(alpha=0.3)
plt.plot(x,u0,label='t=0s')
plt.legend()
###Output
_____no_output_____
###Markdown
---- Numerical solution methodsNext, we derive several numerical methods, which can be used to solve the transient advection equation.We first need to assign the temporal and spatial time steps, $\Delta t$ [s] and $\Delta x$ [m], the velocity $v$ [m/s], and the diffusivity $D$ [m$^2$/s]:
###Code
dt = 0.05
dx = (x.max()-x.min()) / (xstep-1)
v = 0.0
D = 0.1
###Output
_____no_output_____
###Markdown
Thus we have discretised both spatial and temporal variables:$$\begin{array}{rcl}t_n &=& t_0 + n \Delta T, \quad n=0,N \\x_i &=& x_0 + i \Delta x, \quad i=0,I\end{array}$$ The `Courant number`, defined as$$Co = \frac{D \Delta t}{\Delta x^2}$$defines the stability of the numerical solutions. Explicit solutions are stablefor $Co <1$.
###Code
Courant = D*dt/dx/dx
print ('Courant number: ',np.round(Courant,4))
###Output
Courant number: 0.5
###Markdown
---- FTCS explicit scheme (forward time centered space)Our first scheme is called `FTCS`, forward time, centered space, thus the derivatives in thetransient advection equations are replaced by **forward differences in time** and **central differences is space**:$$\frac{c_{i}^{n+1}-c_{i}^{n}}{\Delta t}=D \frac{c_{i+1}^{n}-2 c_{i}^{n} + c_{i-1}^{n}}{\Delta x^2}$$or solved for the next time step$$c_{i}^{n+1}=\left( 1-2\alpha \right) c_{i}^{n} + \alpha \left( c_{i+1}^{n} + c_{i-1}^{n} \right)$$with$$\alpha = D \frac{\Delta t}{\Delta x^2}$$We implement the FTCS explicit method:
###Code
# start time
t = 0
# initial values
u = gaussian(x)
alpha = D*dt/dx/dx
# solution
for n in range(200):
t = t + dt
uold = u
for i in range(1,u.shape[0]-1):
u[i] = (1-2*alpha)*uold[i] + alpha*(uold[i+1]+uold[i-1])
if (np.abs(t-4) < dt/2):
u50 = np.copy(u)
elif (np.abs(t-8) < dt/2):
u100 = np.copy(u)
elif (np.abs(t-10) < dt/2):
u150 = np.copy(u)
print(x[np.where(u0 == np.amax(u0))],u0[np.where(u0 == np.amax(u0))])
print(x[np.where(u50 == np.amax(u50))],u50[np.where(u50 == np.amax(u50))])
print(x[np.where(u100 == np.amax(u100))],u100[np.where(u100 == np.amax(u100))])
print(x[np.where(u150 == np.amax(u150))],u150[np.where(u150 == np.amax(u150))])
plt.figure(figsize=(10,6))
plt.xlabel('x')
plt.ylabel('c')
plt.grid(alpha=0.3)
plt.plot(x,u0,label='t=0s')
plt.plot(x,u50,label='t=4s')
plt.plot(x,u100,label='t=8s')
plt.plot(x,u150,label='t=10s')
plt.title('FTCS method')
plt.legend()
###Output
_____no_output_____
###Markdown
---- DuFort-Frankel schemeOur second scheme is the `DuFort-Frankel scheme`, which reads:$$\frac{c_{i}^{n+1}-c_{i}^{n-1}}{2 \Delta t}=D \frac{c_{i+1}^{n}-2 \frac{c_{i}^{n+1} + c_{i}^{n-1}}{2} + c_{i-1}^{n}}{\Delta x^2}$$or solved for the next time step$$c_{i}^{n+1}=\frac{1-\alpha'}{1+\alpha'} c_{i}^{n-1} + \frac{\alpha'}{1+\alpha'} \left( c_{i+1}^{n}+c_{i-1}^{n} \right)$$with$$\alpha' = 2 D \frac{\Delta t}{\Delta x}$$Note that we need **two older time steps**, $c_{i}^{n}$ and $c_{i}^{n-1}$!We therefore use the FTCS explicit method for the first time step generation...We implement the DuFort-Frankel method:
###Code
# start time
t = 0
# initial values
u = gaussian(x)
# first step with FTCS explicit
uold = u
alpha = D*dt/dx/dx
t = t + dt
for i in range(1,u.shape[0]-1):
u[i] = (1-2*alpha)*uold[i] + alpha * (uold[i+1]+uold[i-1])
# sub-sequent steps with DuFord-Frankel
alphap = D*dt/dx/dx
for n in range(1,200):
t = t + dt
uveryold = uold
uold = u
for i in range(1,u.shape[0]-1):
u[i] = (1-alphap)/(1+alphap)*uveryold[i] + alphap/(1+alphap) * (uold[i+1]+uold[i-1])
if (np.abs(t-4) < dt/2):
u50 = np.copy(u)
elif (np.abs(t-8) < dt/2):
u100 = np.copy(u)
elif (np.abs(t-10) < dt/2):
u150 = np.copy(u)
plt.figure(figsize=(10,6))
plt.xlabel('x')
plt.ylabel('c')
plt.grid(alpha=0.3)
plt.plot(x,u0,label='t=0s')
plt.plot(x,u50,label='t=4s')
plt.plot(x,u100,label='t=8s')
plt.plot(x,u150,label='t=10s')
plt.title('DuFort-Frankel method')
plt.legend()
###Output
_____no_output_____
###Markdown
---- Crank-Nicolson methodOur third scheme is called `Crank-Nicolson method`, which is an **implicit** method:$$\frac{c_{i}^{n+1}-c_{i}^{n}}{\Delta t}=D \frac{\left( c_{i+1}^{n+1} - 2 c_{i}^{n+1} + c_{i-1}^{n+1} \right) +\left( c_{i+1}^{n} - 2 c_{i}^{n} + c_{i-1}^{n} \right)}{\Delta 2 x^2}$$With$\alpha=\frac{D \Delta t}{\Delta x^2}$, we can recast the equation into a system of linear equations:$$-\alpha c_{i+1}^{n+1} + 2 (1+\alpha) c_{i}^{n+1} -\alpha c_{i-1}^{n+1}=\alpha c_{i+1}^{n} + 2 (1-\alpha) c_{i}^{n} +\alpha c_{i-1}^{n}$$The right-hand side is known, thus we have a tridiagonal linear system:$$\mathbf{A} \mathbf{c} = \mathbf{b}$$We implement the Crank-Nicholson method:
###Code
# start time
t = 0
alpha = D*dt/dx/dx
# initial values
u = gaussian(x)
# initialise matrix A and rhs vector b
A = np.zeros(len(u)*len(u)).reshape(len(u),len(u))
b = np.zeros(len(u))
print(A.shape)
print(b.shape)
for n in range(200):
t = t + dt
uold = u
for i in range(len(u)):
# fill matrix
if (i != 0):
A[i,i-1] = - alpha
A[i,i] = + 2*(1+alpha)
if (i != len(u)-1):
A[i,i+1] = - alpha
# fill rhs vector
b[i] = 2*(1-alpha)*uold[i]
if (i != 0):
b[i] = b[i] + alpha*uold[i-1]
if (i != len(u)-1):
b[i] = b[i] + alpha*uold[i+1]
# solve linear sytem
u = np.linalg.solve(A,b)
if (np.abs(t-4) < dt/2):
u50 = np.copy(u)
elif (np.abs(t-8) < dt/2):
u100 = np.copy(u)
elif (np.abs(t-10) < dt/2):
u150 = np.copy(u)
plt.figure(figsize=(10,6))
plt.xlabel('x')
plt.ylabel('c')
plt.grid(alpha=0.3)
plt.plot(x,u0,label='t=0s')
plt.plot(x,u50,label='t=4s')
plt.plot(x,u100,label='t=8s')
plt.plot(x,u150,label='t=10s')
plt.title('Crank-Nicholson method')
plt.legend()
###Output
_____no_output_____ |
Code/6.-Cómo descompongo una matriz no cuadrada (SVD).ipynb | ###Markdown
Descomposición de una matriz en valores singularesLa descomposición por autovectores sólo es aplicable a matrices cuadradas. El presente método permite descomponer cualquier matriz en tres matrices:U → vectores izquierdos singularesD → matriz diagonal de valores singularesV → vectores derechos singularesEstos valores se obtienen en python mediante el método:U,D,V = np.linalg.svd(matriz)Nota:Podemos ve a una matriz rectangular como una subtransformación del espacio, es decir podemos condensar información de tres a dos dimensiones.
###Code
# Importamos la biblioteca
import numpy as np
A = np.array([[1,2,3],[3,4,5]])
print(A)
U, D, V = np.linalg.svd(A)
print(U)
print(D)
print(np.diag(D))
print(V)
###Output
[[-0.39133557 -0.5605708 -0.72980603]
[-0.8247362 -0.13817999 0.54837623]
[ 0.40824829 -0.81649658 0.40824829]]
|
basics/example_from_presentation.ipynb | ###Markdown
Example from presentation
###Code
real_test = np.matrix(((1, 1.305, 1.155, 1.314, 146.113, 8.624, 1.744, 1.82),
(0.766, 1, 0.885, 1.007, 111.93, 6.606, 1.336, 1.394),
(0.866, 1.13, 1, 1.137, 126.48, 7.465, 1.51, 1.575),
(0.761, 0.993, 0.879, 1, 111.202, 6.563, 1.327, 1.385),
(0.007, 0.009, 0.008, 0.009, 1, 0.059, 0.012, 0.012),
(0.116, 0.151, 0.134, 0.152, 16.943, 1, 0.202, 0.211),
(0.573, 0.749, 0.662, 0.753, 83.78, 4.945, 1, 1.043),
(0.55, 0.717, 0.635, 0.722, 80.304, 4.74, 0.959, 1)))
# Pretty matrix print
with np.printoptions(precision=3, suppress=True):
print(real_test)
# Load with standard name
A = np.matrix(((1, 1.305, 1.155, 1.314, 146.113, 8.624, 1.744, 1.82),
(0.766, 1, 0.885, 1.007, 111.93, 6.606, 1.336, 1.394),
(0.866, 1.13, 1, 1.137, 126.48, 7.465, 1.51, 1.575),
(0.761, 0.993, 0.879, 1, 111.202, 6.563, 1.327, 1.385),
(0.007, 0.009, 0.008, 0.009, 1, 0.059, 0.012, 0.012),
(0.116, 0.151, 0.134, 0.152, 16.943, 1, 0.202, 0.211),
(0.573, 0.749, 0.662, 0.753, 83.78, 4.945, 1, 1.043),
(0.55, 0.717, 0.635, 0.722, 80.304, 4.74, 0.959, 1)))
# # Pretty print A matrix
# with np.printoptions(precision=3, suppress=True):
# print(A)
# The matrix is balanced if M^2 - nM = 0
M = np.multiply(A,A) - A.shape[0] * A
# # Pretty print M matrix
# with np.printoptions(precision=1, suppress=True):
# print(M)
if np.array_equal(M, np.zeros(shape = M.shape)):
print(f"The given matrix is a balanced matrix.")
elif not np.array_equal(M, np.zeros(shape = M.shape)):
print(f"The given matrix is unbalanced.")
else:
print(f"Unknown error encountered.")
# Calculate Luis matrix
luis = np.multiply(A, np.transpose(A))
if np.array_equal(luis, np.zeros(shape=luis.shape)):
print(f"There are no opportunities for arbitrage in this matrix.")
else:
print(f"Arbitrage is a possibility in this matrix.")
# # Pretty print luis matrix
# with np.printoptions(precision=3, suppress=True):
# print(luis)
###Output
The given matrix is unbalanced.
Arbitrage is a possibility in this matrix.
###Markdown
Find the most profitable transactions to conduct
###Code
# Ajusting code for this specific example
# References
presentation_tuple = tuple(['GBP','USD','EUR','CHF','JPY','DKK','CAD','AUD'])
presentation_currencies = {'GBP' :1,'USD' :2,'EUR' :3,'CHF' :4,'JPY' :5,'DKK' :6,'CAD' :7, 'AUD':8}
###########################################################################
# Find the most profitable transaction paths in the "Luis" matrix
# and then saves the pairs of most profitable transaction paths
# in a list of tuples
###########################################################################
# Find the top three top max values by copying whole luis matrix
# and sequentially erasing the top value to find the next top value
luis_ = copy.deepcopy(luis)
max_values = np.empty(shape=(3,1))
max_locations = []
for i in range(3):
# Find max and its index on matrix
max_found = np.max(luis_)
locations = np.where(luis_ == max_found)
# Print maximums found
print(f"Max number {i} is {max_found}")
# Set max locations ot zero to find
# the next maximum
for v in locations:
luis_[v] = 0
# Collected the values maximum & one index
max_values[i] = max_found
max_locations.append(tuple(x for x in locations))
# Print most profitable transaction paths & collect names currencies.
max_currency_names = []
for i in max_values:
result = np.where(luis == i)
# print(result)
print(f"Most profitable path is back and forth between "
f"{presentation_tuple[result[0][0]]}"
f" and {presentation_tuple[result[0][1]]}.")
tmp_tuple = tuple([presentation_tuple[result[0][0]],
presentation_tuple[result[0][1]]])
max_currency_names.append(tmp_tuple)
print(f"Saved names of currencies are: {max_currency_names}")
###########################################################################
###Output
Max number 0 is 1.022791
Max number 1 is 1.01184
Max number 2 is 1.0073699999999999
Most profitable path is back and forth between GBP and JPY.
Most profitable path is back and forth between EUR and JPY.
Most profitable path is back and forth between USD and JPY.
Saved names of currencies are: [('GBP', 'JPY'), ('EUR', 'JPY'), ('USD', 'JPY')]
###Markdown
Simulate a currency exchange with randomly chosen path
###Code
#####################################################################################
# Simulate currency exchange with random path
#####################################################################################
# Parameters
investment = 100
t = 10
fees = 1
# Money after the exchange using luis path (IMPORTANT NOTE: IT HAS TO BE MINUS 1
# BECAUSE WE ARE DEALING WITH INDICES)
one_ = presentation_currencies.get('DKK') - 1
two_ = presentation_currencies.get('JPY') - 1
three_ = presentation_currencies.get('AUD') - 1
# Exchange rates
one_to_two = A[one_, two_]
two_to_one = A[two_, one_]
two_to_three = A[two_, three_]
three_to_two = A[three_, two_]
# Calculate money after trade and the profit obtained
# [in. conversion] [bounce between cad and eur] [convert back to usd]
result = investment * one_to_two * (two_to_three*three_to_two)**t * two_to_one * fees**t
profit = result - investment
print("Summary:\n"
f"Initial investment of: ${investment}\n"
f"Total number of iterations: {t}\n"
f"Percentage of total money lost to fees: {np.round((1-fees)*100)}%\n"
f"Money after trade was concluded: ${result}\n"
f"Lost to fees: ${investment*(1-fees)}\n"
f"Final profit: ${profit}")
#####################################################################################
###Output
Summary:
Initial investment of: $100
Total number of iterations: 10
Percentage of total money lost to fees: 0%
Money after trade was concluded: $69.02820279674255
Lost to fees: $0
Final profit: $-30.971797203257452
###Markdown
Simulate a currency exchange with "Luis" path
###Code
#####################################################################################
# Simulate currency exchange with "Luis" path
#####################################################################################
# Parameters
investment = 100
t = 10
fees = 1
# Money after the exchange using luis path (IMPORTANT NOTE: IT HAS TO BE MINUS 1
# BECAUSE WE ARE DEALING WITH INDICES)
one_ = presentation_currencies.get('USD') - 1
two_ = presentation_currencies.get('GBP') - 1 # These two are the
three_ = presentation_currencies.get('JPY') - 1 # most profitable transaction
# Exchange rates
one_to_two = A[one_, two_]
two_to_one = A[two_, one_]
two_to_three = A[two_, three_]
three_to_two = A[three_, two_]
# Calculate money after trade and the profit obtained
# [in. conversion] [bounce between cad and eur] [convert back to usd]
result = investment * one_to_two * (two_to_three*three_to_two)**t * two_to_one * fees**t
profit = result - investment
## VERIFY MOST REPEATED TRANSACTION IS MOST PROFITABLE TRANSACTION
assert two_to_three*three_to_two == np.max(luis)
print("Summary:\n"
f"Initial investment of: ${investment}\n"
f"Total number of iterations: {t}\n"
f"Percentage of total money lost to fees: {np.round((1-fees)*100)}%\n"
f"Money after trade was concluded: ${result}\n"
f"Lost to fees: ${investment*(1-fees)}\n"
f"Final profit: ${profit}")
#####################################################################################
###Output
Summary:
Initial investment of: $100
Total number of iterations: 10
Percentage of total money lost to fees: 0%
Money after trade was concluded: $125.22996508142167
Lost to fees: $0
Final profit: $25.229965081421668
|
python/practice/1-4_quiz2.ipynb | ###Markdown
문제 120이하의 자연수 중 3으로 나눴을 때 나머지가 1인 숫자를 출력하기
###Code
# 정답을 작성해주세요.
###Output
1
4
7
10
13
16
19
###Markdown
문제 2다음과 같이 출력하기 \* \** \*\*\* \*\*\*\* \*\*\*\*\*
###Code
# 정답을 작성해주세요.
###Output
*
**
***
****
*****
###Markdown
문제 3고객의 개인정보보호를 위하여 이름을 비식별화 하여 출력하기 예시) 홍길동 → 홍\*동
###Code
names = ['홍길동', '홍계월', '김철수', '이영희', '박첨지']
# 정답을 작성해주세요.
###Output
홍*동
홍*월
김*수
이*희
박*지
|
5.2 Orthogonal Projections and Their Applications.ipynb | ###Markdown
Orthogonal Projections and Their Applications正交投影 1.Overview 正交投影是向量空间方法的基石,应用于: - 最小二乘投影(线性回归)- 多元正态(高斯)分布的条件期望- 格兰-施密特正交化 - QR分解- 正交多项式 2.Key Definitions 假定$x,z\in\mathbb{R}^n$,定义$\langle x,z\rangle=\sum_i x_i z_i$,回顾$\|x\|^2=\langle x,x\rangle$,由**余弦定理**得到$\langle x,z\rangle=\|x\|\ \|z\|cos(\theta)$,其中$\theta$是夹角。如果$cos(\theta)=0$,那么就说$x$和$z$是正交的,写为$x\perp z$。 如果$x\perp z$,且$z\in S$,那么$x\perp S$。 **线性子空间$S\subset RR^n$的正交补集是集合**$S^{\perp}:=\{x\in \mathbb R^n:x\perp S\}$ **正交集** $x_1,...,x_k\subset\mathbb R^n$,其中$x_i\perp x_j,i\neq j$。由$毕德哥拉斯定理$,有$$\|x_1+...+x_k\|^2=\|x_1\|^2+...+\|x_k\|^2$$ Linear Independence vs Orthogonality如果$X\subset \mathbb R^n$是正交集,且$0\not\in X$,那么$X$是线性独立的。 3.The Orthogonal Projection Theorem 给定一个向量,我们想在某一线性空间中找到最能近似替代这一给定向量的向量,正交投影解决了这个问题。 **正交投影定理**给定$y\in\mathbb R^n$且线性子空间$S\subset \mathbb R^n$,那么存在最小化问题:$$\hat y:=\arg\min_{z\in S}\|y-z\|$$唯一的最小化$\hat y$满足- $\hat y \in S$- $y-\hat y \perp S$ 向量 $\hat y$ 是向量y在S上的正交投影。 Proof of sufficiency$\hat y$是一个向量,且$\hat y\in S$,$y-\hat y\perp S$,$z$是$S$中任意的其他向量,可以推导$$\|y-z\|^2=\|(y-\hat y)+(\hat y -z)\|^2=\|y-\hat y\|^2+\|\hat y-z\|^2$$因此$\|y-z\| \geq\|y-\hat y\|$ Orthogonal Projection as a Mapping给定线性空间$S$,对于另一线性空间$Y$,有$$y\in Y \to \text{它的正交投影}\hat y\in S$$有矩阵$P$- $Py$代表投影$\hat y$- $\hat E_S y=Py$,其中$\hat E$是广义期望因子$P$是S上的正交投影映射 1. $Py\in S$2. $y-Py\perp S$ 可以推导出有用的属性:1. $\|y\|^2=\|Py\|^2+\|y-Py\|^2$2. $\|Py\| \leq \|y\|$ Orthogonal Complement**定义** $S$的正交补集是线性子空间$S^\perp$,满足$x_1\perp x_2$,对于$x_1\in S$和$x_2\in S^\perp$。 线性空间$Y$包含线性子空间$S$和它的正交补集$S^\perp$,写为$$Y=S\oplus S^\perp$$对于每个$y\in Y$,有唯一的$x_1\in S$和唯一的$x_2\in S^\perp$,使得$y=x_1+x_2$。也就是$x_1=\hat E_S y$和$x_2=y-\hat E_S y$,$x_1$是$y$在$S$上的正交投影。**定理** 如果$S$是$\mathbb R^n $的线性子空间,$\hat E_S y=Py$并且$\hat E_{S^\perp}y=My$,有$$Py\perp My \ \text{and} \ y=Py+My \ \text{for all} \ y\in \mathbb R^n$$ 4. Orthonormal Basis**标准正交集** 如果对于所有$u\in O\subset \mathbb R^n$,有$\|u\|=1$,那么正交向量集$O$是标准正交集。 **标准正交基** 如果$S$是$\mathbb R^n$的线性子空间,$O\subset S$,$O$是正交的并且$\text{span}O = S$,那么$O$被称为$S$的标准正交基。 **组合** 如果$\{u_1,...,u_k\}$是线性子空间$S$的标准正交基,那么$$x=\sum_{i=1}^k \langle x,u_i \rangle u_i \ \text{for all} \ x\in S$$如果$x\in \text{span}\{u_1,...,u_k\}$,可以找到标量$\alpha_1,...,\alpha_k$来证实$$x=\sum_{j=1}^k \alpha_j u_j$$内积为$$\langle x,u_i \rangle=\sum_{j=1}^k \alpha_j \langle u_j, u_i \rangle = \alpha_i$$与上式结合起来可以得到前面的结论 Projection onto an Orthonormal Basis**定理** 如果$\{u_1,...,u_k\}$是$S$的标准正交基,那么$$Py=\sum_{i=1}^k \langle y,u_i \rangle u_i, \ \forall y\in\mathbb R^n$$**证明** $y\in\mathbb R^n$,$Py$定义为上式,明显的$Py\in S$,$y-Py \perp S$也存在,同样的,$y-Py \perp u_i$,因为$$\langle y-\sum_{i=1}^k \langle y,u_i \rangle u_i,u_j \rangle =\langle y,u_j \rangle -\sum_{i=1}^k \langle y,u_i\rangle\langle u_i,u_j\rangle=0$$ 5. Projection Using Matrix Algebra想要计算矩阵$P$使得$$\hat E_S y=Py$$明显的$Py$是$y\in\mathbb R^n$的线性函数。**定理** $X_{n\times k}$的列向量是$S$的基,有$$P=X(X'X)^{-1}X'$$**证明** 给定随机数$y\in\mathbb R^n$,并且$P=X(X'X)^{-1}X'$,证明1. $Py\in S$2. $y-Py\perp S$证明1 $$Py=X(X'X)^{-1}X'y=Xa \ \text{when} \ a:=(X'X)^{-1}X'y$$$Xa$是$X$列向量的线性组合,是$S$的一个元素。 证明2 $$y-X(X'X)^{-1}X'y\perp Xb \ \ \forall b\in\mathbb R^K$$如果$b\in\mathbb R^K$,有$$(Xb)'[y-X(X'X)^{-1}X'y]=b'[X'y-X'y]=0$$ Starting with $X$矩阵$X_{n\times k}$的各列是线性独立的,并且$$S:=\text{span} X:=\text{span}\{\text{col}_1X,...,\text{col}_kX\}$$$X$的各列可以组成$S$的基。$P=X(X'X)^{-1}X'$可以将$y$投射到$S$上,因此也被称作**投射矩阵**。$M=I-P$满足$My=\hat E_{S^\perp}y$也被称作**歼灭矩阵**(annihilator matrix)。 The Orthonormal Case假定$U_{n\times k}$有标准正交列向量,$S:=\text{span}U$,投射$y$到$S$上:$$Py=U(U'U)^{-1}U'y$$因为$U_{n\times k}$有标准正交列向量,所以$U'U=I$,因此$$Py=UU'y=\sum_{i=1}^k\langle u_i,y\rangle u_i$$ Application: Overdetermined Systems of Equations$X_{n\times k}$是由线性无关的列向量组成的,$y\in\mathbb R^n$,给定$X$和$y$,我们试图找到$b\in\mathbb R^k$满足$Xb=y$。如果$n>k$,$b$是过度决定的,直觉上可能不能找到$b$满足所有$n$个等式。有两个方法- 接受确定解可能不存在- 寻求近似解($Xb$尽可能的接近$y$)**定理** $\|y-Xb\|$最小化得到$$\hat \beta:=(X'X)^{-1}X'y=Py$$**证明** 注意$$X\hat\beta=X(X'X)^{-1}X'y=Py$$$Py$是在$\text{span}(X)$上的投影,我们有$$\|y-Py\|\leq\|y-z\| \ \text{for any} \ z\in\text{span}(X)$$ 因为$Xb\in\text{span}(X)$$$\|y-X\hat\beta\|\leq\|y-Xb\| \ \text{for any}\ b\in\mathbb R^K $$ 6. Least Squares Regression Squared risk measures给定pairs$(x,y)\in\mathbb{R}^K\times\mathbb{R}$,考虑选择$f:\mathbb{R}^K\to\mathbb{R}$来最小化风险$$R(f):=\mathbb{E}[(y-f(x))^2]$$如果概率,$\mathbb E$不可知,但是样本可用,我们可以估计**经验风险**:$$\min_{f\in\mathcal F}\frac{1}{N}\sum_{n=1}^N(y_n-f(x_n))^2$$最小化上式称为**经验风险最小化**。如果使$\mathcal F$为线性方程组$1/N$,问题变为**线性最小二乘问题**:$$\min_{b\in\mathbb R^K}\sum_{n=1}^N(y_n-b'x_n)^2$$ Solution设定矩阵$X_{N\times K}$,其中$N>K$,且$X$满秩。可以证明$\|y-Xb\|^2=\sum_{n=1}^N(y_n-b'x_n)^2$。单调变化不影响最小化,因此有$$\arg\min_{b\in\mathbb R^K}\sum_{n=1}^N(y_n-b'x_n)^2=\arg\min_{b\in\mathbb R^K}\|y-Xb\|$$解为$$\hat \beta:=(X'X)^{-1}X'y$$令$P$和$M$为关于$X$的投影和歼灭:$$P:=X(X'X)^{-1}X' \ \text{and} \ M:=I-P$$拟合值向量:$$\hat y:=X\hat\beta=Py$$残差向量:$$\hat u:=y-\hat y=y-Py=My$$ 7. Orthogonalization and Decomposition下面探讨线性独立与正交化的关系。 Gram-Schmidt Orthogonalization**定理** 对于每个线性独立集$\{x_1,...,x_k\}\subset\mathbb R^n$,存在标准正交集$\{u_1,...,u_k\}$$$\text{span}\{x_1,...,x_k\}=\text{span}\{u_1,...,u_k\}\ \text{for} \ i=1,...,k$$**格兰-施密特正交化**创建正交集的方法- 1. 设$v_1=x_1$- 2. 对于$i\neq 2$,设$v_i:=\hat E_{S_{i=1}^\perp}x_i$,$u_i:=v_i/\|v_i\|$ QR Decomposition**定理** 列向量线性不相关的矩阵$X_{n\times k}$,存在$X=QR$,其中:- $R_{k\times k}$上三角,非奇异- $Q_{n\times k}$有标准正交列向量 Linear Regression via QR Decomposition$$y=X\beta$$$$\hat\beta=(X'X)^{-1}X'y$$$$X=QR$$$$\begin{aligned}\hat\beta & =(R'Q'QR)^{-1}R'Q'y \\& = (R'R)^{-1}R'Q'y \\& = R^{-1}(R')^{-1}R'Q'y=R^{-1}Q'y\end{aligned}$$ 8.Exercises Exercise 3使用格兰-施密特正交化方法,投影矩阵$P:=X(X'X)^{-1}X'$,并且使用$QR$分解,解出$y$在$X$列空间上的线性投影。$y:=\begin{pmatrix}1 \\3\\-3\end{pmatrix}$,$X:=\begin{pmatrix}1 & 0 \\0 & -6 \\2 & 2\end{pmatrix}$
###Code
import numpy as np
def gram_schmidt(X):
"""
Implements Gram-Schmidt orthogonalization.
Parameters
----------
X : an n x k array with linearly independent columns
Returns
-------
U : an n x k array with orthonormal columns
"""
# Set up
n, k = X.shape
U = np.empty((n, k))
I = np.eye(n)
# The first col of U is just the normalized first col of X
v1 = X[:,0]
U[:, 0] = v1 / np.sqrt(np.sum(v1 * v1))
for i in range(1, k):
# Set up
b = X[:,i] # The vector we're going to project
Z = X[:, 0:i] # first i-1 columns of X
# Project onto the orthogonal complement of the col span of Z
M = I - Z @ np.linalg.inv(Z.T @ Z) @ Z.T # M=I-Z(Z'Z)^{-1}Z'
u = M @ b
# Normalize
U[:,i] = u / np.sqrt(np.sum(u * u))
return U
###Output
_____no_output_____
###Markdown
将向量及矩阵带入
###Code
y = [1, 3, -3]
X = [[1, 0],
[0, -6],
[2, 2]]
X, y = [np.asarray(z) for z in (X, y)]
###Output
_____no_output_____
###Markdown
普通矩阵表示法下的y在列向量空间X上的投影
###Code
Py1 = X @ np.linalg.inv(X.T @ X) @ X.T @ y
Py1
###Output
_____no_output_____
###Markdown
格兰-施密特正交化方法下的y在列向量空间X上的投影
###Code
U = gram_schmidt(X)
U
Py2 = U @ U.T @ y
Py2
###Output
_____no_output_____
###Markdown
结果一样!再用QR分解法来验证结果
###Code
from scipy.linalg import qr
Q, R = qr(X, mode='economic')
Q
Py3 = Q @ Q.T @ y
Py3
###Output
_____no_output_____ |
sber_swap_demo.ipynb | ###Markdown
GitHub https://github.com/sberbank-ai/sber-swap/tree/main/models 論文 https://habr.com/ru/company/sberbank/blog/645919/ 環境セットアップ GPUの確認
###Code
!nvidia-smi
!nvcc --version
###Output
_____no_output_____
###Markdown
GitHubからCode Clone
###Code
%cd /content/
!git clone https://github.com/sberbank-ai/sber-swap.git
%cd sber-swap
###Output
_____no_output_____
###Markdown
学習済みモデルのダウンロード
###Code
# arcfaceのダウンロード
!wget -P ./arcface_model https://github.com/sberbank-ai/sber-swap/releases/download/arcface/backbone.pth
!wget -P ./arcface_model https://github.com/sberbank-ai/sber-swap/releases/download/arcface/iresnet.py
# landmarks detectorのダウンロード
!wget -P ./insightface_func/models/antelope https://github.com/sberbank-ai/sber-swap/releases/download/antelope/glintr100.onnx
!wget -P ./insightface_func/models/antelope https://github.com/sberbank-ai/sber-swap/releases/download/antelope/scrfd_10g_bnkps.onnx
# SberSwapのダウンロード
!wget -P ./weights https://github.com/sberbank-ai/sber-swap/releases/download/sber-swap-v2.0/G_unet_2blocks.pth
# SuperResolutionのダウンロード
!wget -P ./weights https://github.com/sberbank-ai/sber-swap/releases/download/super-res/10_net_G.pth
###Output
_____no_output_____
###Markdown
ライブラリのインストール
###Code
!pip install mxnet-cu101mkl
!pip install onnxruntime-gpu==1.8
!pip install insightface==0.2.1
!pip install kornia==0.5.4
###Output
_____no_output_____
###Markdown
ライブラリのインポート
###Code
import cv2
import torch
import time
import os
from utils.inference.image_processing import crop_face, get_final_image, show_images
from utils.inference.video_processing import read_video, get_target, get_final_video, add_audio_from_another_video, face_enhancement
from utils.inference.core import model_inference
from network.AEI_Net import AEI_Net
from coordinate_reg.image_infer import Handler
from insightface_func.face_detect_crop_multi import Face_detect_crop
from arcface_model.iresnet import iresnet100
from models.pix2pix_model import Pix2PixModel
from models.config_sr import TestOptions
from google.colab import files
###Output
_____no_output_____
###Markdown
Modelのbuild, load
###Code
# 初期化
app = Face_detect_crop(name='antelope', root='./insightface_func/models')
app.prepare(ctx_id= 0, det_thresh=0.6, det_size=(640,640))
# modelのbuild
G = AEI_Net(backbone='unet', num_blocks=2, c_id=512)
G.eval()
G.load_state_dict(torch.load('weights/G_unet_2blocks.pth', map_location=torch.device('cpu')))
G = G.cuda()
G = G.half()
# arcfaceからface embeddingのロード
netArc = iresnet100(fp16=False)
netArc.load_state_dict(torch.load('arcface_model/backbone.pth'))
netArc=netArc.cuda()
netArc.eval()
# face landmarksのロード
handler = Handler('./coordinate_reg/model/2d106det', 0, ctx_id=0, det_size=640)
# use_sr=True -> 超解像(高解像度化)
use_sr = True
if use_sr:
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
torch.backends.cudnn.benchmark = True
opt = TestOptions()
#opt.which_epoch ='10_7'
model = Pix2PixModel(opt)
model.netG.train()
###Output
_____no_output_____
###Markdown
FaceSwap画像セットアップ
###Code
%cd /content/sber-swap
#@markdown ファイルをアップロードするか、
#@markdown サンプルを使用するか選択してください。
#@markdown アップデートする場合初めに使用したい顔の画像をアップロードし、
#@markdown 次に顔を当てはめたい画像、または動画をアップロードしてください。
setting_type = 'upload' #@param ["upload", "sample"]
#@markdown videoか、imageのどちらの顔を交換するか選択してください。
target_type = 'image' #@param ["video", "image"]
source_path = 'examples/images/elon_musk.jpg'
target_path = 'examples/images/beckham.jpg'
path_to_video = 'examples/videos/nggyup.mp4'
if setting_type == 'upload':
# 初めにsourceをアップロード
upload_src = files.upload()
upload_src = list(upload_src.keys())
source_path = upload_src[0]
upload_target = files.upload()
upload_target = list(upload_target.keys())
if target_type=='image':
target_path = upload_target[0]
else:
path_to_video = upload_target[0]
source_full = cv2.imread(source_path)
OUT_VIDEO_NAME = "examples/results/result.mp4"
crop_size = 224
try:
source = crop_face(source_full, app, crop_size)[0]
source = [source[:, :, ::-1]]
print("Everything is ok!")
except TypeError:
print("Bad source images")
# read video
if target_type == 'image':
target_full = cv2.imread(target_path)
full_frames = [target_full]
else:
full_frames, fps = read_video(path_to_video)
target = get_target(full_frames, app, crop_size)
###Output
_____no_output_____
###Markdown
FaceSwap実行Source->顔を使用Target->Sourceの顔を当てはめる
###Code
#@markdown #**Inference**
batch_size = 40#@param {type:"integer"}
START_TIME = time.time()
final_frames_list, crop_frames_list,full_frames, tfm_array_list = model_inference(
full_frames,
source,
target,
netArc,
G,
app,
set_target = False,
crop_size=crop_size,
BS=batch_size
)
if use_sr:
final_frames_list = face_enhancement(final_frames_list, model)
if target_type == 'video':
get_final_video(final_frames_list,
crop_frames_list,
full_frames,
tfm_array_list,
OUT_VIDEO_NAME,
fps,
handler)
add_audio_from_another_video(path_to_video, OUT_VIDEO_NAME, "audio")
print(f'Full pipeline took {time.time() - START_TIME}')
print(f"Video saved with path {OUT_VIDEO_NAME}")
else:
result = get_final_image(final_frames_list, crop_frames_list, full_frames[0], tfm_array_list, handler)
cv2.imwrite('examples/results/result.png', result)
###Output
_____no_output_____
###Markdown
FaceSwap結果の表示
###Code
import matplotlib.pyplot as plt
if target_type == 'image':
show_images(
[source[0][:, :, ::-1], target_full, result],
['Source Image', 'Target Image', 'Swapped Image'],
figsize=(20, 15))
from moviepy.editor import *
clip = VideoFileClip(OUT_VIDEO_NAME)
clip.ipython_display()
###Output
_____no_output_____ |
site/en/r1/tutorials/load_data/images.ipynb | ###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Load images with tf.data Run in Google Colab View source on GitHub This tutorial provides a simple example of how to load an image dataset using `tf.data`.The dataset used in this example is distributed as directories of images, with one class of image per directory. Setup
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
tf.enable_eager_execution()
tf.__version__
AUTOTUNE = tf.data.experimental.AUTOTUNE
###Output
_____no_output_____
###Markdown
Download and inspect the dataset Retrieve the imagesBefore you start any training, you'll need a set of images to teach the network about the new classes you want to recognize. We have created an archive of creative-commons licensed flower photos that you can use initially.
###Code
import pathlib
data_root_orig = tf.keras.utils.get_file('flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
data_root = pathlib.Path(data_root_orig)
print(data_root)
###Output
_____no_output_____
###Markdown
After downloading 218MB, you should now have a copy of the flower photos available:
###Code
for item in data_root.iterdir():
print(item)
import random
all_image_paths = list(data_root.glob('*/*'))
all_image_paths = [str(path) for path in all_image_paths]
random.shuffle(all_image_paths)
image_count = len(all_image_paths)
image_count
all_image_paths[:10]
###Output
_____no_output_____
###Markdown
Inspect the imagesNow let's have a quick look at a couple of the images, so you know what you're dealing with:
###Code
import os
attributions = (data_root/"LICENSE.txt").open(encoding='utf-8').readlines()[4:]
attributions = [line.split(' CC-BY') for line in attributions]
attributions = dict(attributions)
import IPython.display as display
def caption_image(image_path):
image_rel = pathlib.Path(image_path).relative_to(data_root)
return "Image (CC BY 2.0) " + ' - '.join(attributions[str(image_rel)].split(' - ')[:-1])
for n in range(3):
image_path = random.choice(all_image_paths)
display.display(display.Image(image_path))
print(caption_image(image_path))
print()
###Output
_____no_output_____
###Markdown
Determine the label for each image List the available labels:
###Code
label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())
label_names
###Output
_____no_output_____
###Markdown
Assign an index to each label:
###Code
label_to_index = dict((name, index) for index,name in enumerate(label_names))
label_to_index
###Output
_____no_output_____
###Markdown
Create a list of every file, and its label index
###Code
all_image_labels = [label_to_index[pathlib.Path(path).parent.name]
for path in all_image_paths]
print("First 10 labels indices: ", all_image_labels[:10])
###Output
_____no_output_____
###Markdown
Load and format the images TensorFlow includes all the tools you need to load and process images:
###Code
img_path = all_image_paths[0]
img_path
###Output
_____no_output_____
###Markdown
here is the raw data:
###Code
img_raw = tf.io.read_file(img_path)
print(repr(img_raw)[:100]+"...")
###Output
_____no_output_____
###Markdown
Decode it into an image tensor:
###Code
img_tensor = tf.image.decode_image(img_raw)
print(img_tensor.shape)
print(img_tensor.dtype)
###Output
_____no_output_____
###Markdown
Resize it for your model:
###Code
img_final = tf.image.resize(img_tensor, [192, 192])
img_final = img_final/255.0
print(img_final.shape)
print(img_final.numpy().min())
print(img_final.numpy().max())
###Output
_____no_output_____
###Markdown
Wrap these up in simple functions for later.
###Code
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [192, 192])
image /= 255.0 # normalize to [0,1] range
return image
def load_and_preprocess_image(path):
image = tf.read_file(path)
return preprocess_image(image)
import matplotlib.pyplot as plt
img_path = all_image_paths[0]
label = all_image_labels[0]
plt.imshow(load_and_preprocess_image(img_path))
plt.grid(False)
plt.xlabel(caption_image(img_path).encode('utf-8'))
plt.title(label_names[label].title())
print()
###Output
_____no_output_____
###Markdown
Build a `tf.data.Dataset` A dataset of images The easiest way to build a `tf.data.Dataset` is using the `from_tensor_slices` method.Slicing the array of strings results in a dataset of strings:
###Code
path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
###Output
_____no_output_____
###Markdown
The `output_shapes` and `output_types` fields describe the content of each item in the dataset. In this case it is a set of scalar binary-strings
###Code
print('shape: ', repr(path_ds.output_shapes))
print('type: ', path_ds.output_types)
print()
print(path_ds)
###Output
_____no_output_____
###Markdown
Now create a new dataset that loads and formats images on the fly by mapping `preprocess_image` over the dataset of paths.
###Code
image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
for n,image in enumerate(image_ds.take(4)):
plt.subplot(2,2,n+1)
plt.imshow(image)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.xlabel(caption_image(all_image_paths[n]))
plt.show()
###Output
_____no_output_____
###Markdown
A dataset of `(image, label)` pairs Using the same `from_tensor_slices` method you can build a dataset of labels
###Code
label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(all_image_labels, tf.int64))
for label in label_ds.take(10):
print(label_names[label.numpy()])
###Output
_____no_output_____
###Markdown
Since the datasets are in the same order you can just zip them together to get a dataset of `(image, label)` pairs.
###Code
image_label_ds = tf.data.Dataset.zip((image_ds, label_ds))
###Output
_____no_output_____
###Markdown
The new dataset's `shapes` and `types` are tuples of shapes and types as well, describing each field:
###Code
print(image_label_ds)
###Output
_____no_output_____
###Markdown
Note: When you have arrays like `all_image_labels` and `all_image_paths`, an alternative to using `tf.data.dataset.Dataset.zip` is slicing the pair of arrays.
###Code
ds = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels))
# The tuples are unpacked into the positional arguments of the mapped function
def load_and_preprocess_from_path_label(path, label):
return load_and_preprocess_image(path), label
image_label_ds = ds.map(load_and_preprocess_from_path_label)
image_label_ds
###Output
_____no_output_____
###Markdown
Basic methods for training To train a model with this dataset you will want the data:* To be well shuffled.* To be batched.* To repeat forever.* To have batches available as soon as possible.These features can be easily added using the `tf.data` api.
###Code
BATCH_SIZE = 32
# Setting a shuffle buffer size as large as the dataset ensures that the data is
# completely shuffled.
ds = image_label_ds.shuffle(buffer_size=image_count)
ds = ds.repeat()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches, in the background while the model is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
There are a few things to note here:1. The order is important. * A `.shuffle` *after* a `.repeat` would shuffle items across epoch boundaries (some items will be seen twice before others are seen at all). * A `.shuffle` *after* a `.batch` would shuffle the order of the batches, but not shuffle the items across batches.1. Use a `buffer_size` the same size as the dataset for a full shuffle. Up to the dataset size, large values provide better randomization, but use more memory.1. The shuffle buffer is filled before any elements are pulled from it. So a large `buffer_size` may cause a delay when your `Dataset` is starting.1. The shuffled dataset doesn't report the end of a dataset until the shuffle-buffer is completely empty. The `Dataset` is restarted by `.repeat`, causing another wait for the shuffle-buffer to be filled.This last point, as well as the order of `.shuffle` and `.repeat`, can be addressed by using the `tf.data.Dataset.apply` method with the fused `tf.data.experimental.shuffle_and_repeat` function:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE)
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
* For more on ordering the operations, see [Repeat and Shuffle](https://www.tensorflow.org/r1/guide/performance/datasetsrepeat_and_shuffle) in the Input Pipeline Performance guide. Pipe the dataset to a modelFetch a copy of MobileNet v2 from `tf.keras.applications`.This will be used for a simple transfer learning example.Set the MobileNet weights to be non-trainable:
###Code
mobile_net = tf.keras.applications.MobileNetV2(input_shape=(192, 192, 3), include_top=False)
mobile_net.trainable=False
###Output
_____no_output_____
###Markdown
This model expects its input to be normalized to the `[-1,1]` range:```help(keras_applications.mobilenet_v2.preprocess_input)```...This function applies the "Inception" preprocessing which convertsthe RGB values from [0, 255] to [-1, 1]... So before passing data to the MobileNet model, you need to convert the input from a range of `[0,1]` to `[-1,1]`.
###Code
def change_range(image,label):
return 2*image-1, label
keras_ds = ds.map(change_range)
###Output
_____no_output_____
###Markdown
The MobileNet returns a `6x6` spatial grid of features for each image.Pass it a batch of images to see:
###Code
# The dataset may take a few seconds to start, as it fills its shuffle buffer.
image_batch, label_batch = next(iter(keras_ds))
feature_map_batch = mobile_net(image_batch)
print(feature_map_batch.shape)
###Output
_____no_output_____
###Markdown
Because of this output shape, build a model wrapped around MobileNet using `tf.keras.layers.GlobalAveragePooling2D` to average over the space dimensions before the output `tf.keras.layers.Dense` layer:
###Code
model = tf.keras.Sequential([
mobile_net,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(len(label_names), activation = 'softmax')])
###Output
_____no_output_____
###Markdown
Now it produces outputs of the expected shape:
###Code
logit_batch = model(image_batch).numpy()
print("min logit:", logit_batch.min())
print("max logit:", logit_batch.max())
print()
print("Shape:", logit_batch.shape)
###Output
_____no_output_____
###Markdown
Compile the model to describe the training procedure:
###Code
model.compile(optimizer=tf.train.AdamOptimizer(),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
There are 2 trainable variables: the Dense `weights` and `bias`:
###Code
len(model.trainable_variables)
model.summary()
###Output
_____no_output_____
###Markdown
Train the model.Normally you would specify the real number of steps per epoch, but for demonstration purposes only run 3 steps.
###Code
steps_per_epoch=tf.ceil(len(all_image_paths)/BATCH_SIZE).numpy()
steps_per_epoch
model.fit(ds, epochs=1, steps_per_epoch=3)
###Output
_____no_output_____
###Markdown
PerformanceNote: This section just shows a couple of easy tricks that may help performance. For an in depth guide see [Input Pipeline Performance](https://www.tensorflow.org/r1/guide/performance/datasets).The simple pipeline used above reads each file individually, on each epoch. This is fine for local training on CPU but may not be sufficient for GPU training, and is totally inappropriate for any sort of distributed training. To investigate, first build a simple function to check the performance of our datasets:
###Code
import time
def timeit(ds, batches=2*steps_per_epoch+1):
overall_start = time.time()
# Fetch a single batch to prime the pipeline (fill the shuffle buffer),
# before starting the timer
it = iter(ds.take(batches+1))
next(it)
start = time.time()
for i,(images,labels) in enumerate(it):
if i%10 == 0:
print('.',end='')
print()
end = time.time()
duration = end-start
print("{} batches: {} s".format(batches, duration))
print("{:0.5f} Images/s".format(BATCH_SIZE*batches/duration))
print("Total time: {}s".format(end-overall_start))
###Output
_____no_output_____
###Markdown
The performance of the current dataset is:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Cache Use `tf.data.Dataset.cache` to easily cache calculations across epochs. This is especially performant if the data fits in memory.Here the images are cached, after being pre-precessed (decoded and resized):
###Code
ds = image_label_ds.cache()
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
One disadvantage to using an in-memory cache is that the cache must be rebuilt on each run, giving the same startup delay each time the dataset is started:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
If the data doesn't fit in memory, use a cache file:
###Code
ds = image_label_ds.cache(filename='./cache.tf-data')
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(1)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
The cache file also has the advantage that it can be used to quickly restart the dataset without rebuilding the cache. Note how much faster it is the second time:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
TFRecord File Raw image dataTFRecord files are a simple format for storing a sequence of binary blobs. By packing multiple examples into the same file, TensorFlow is able to read multiple examples at once, which is especially important for performance when using a remote storage service such as GCS.First, build a TFRecord file from the raw image data:
###Code
image_ds = tf.data.Dataset.from_tensor_slices(all_image_paths).map(tf.read_file)
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(image_ds)
###Output
_____no_output_____
###Markdown
Next build a dataset that reads from the TFRecord file and decodes/reformats the images using the `preprocess_image` function you defined earlier.
###Code
image_ds = tf.data.TFRecordDataset('images.tfrec').map(preprocess_image)
###Output
_____no_output_____
###Markdown
Zip that with the labels dataset you defined earlier, to get the expected `(image,label)` pairs.
###Code
ds = tf.data.Dataset.zip((image_ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
This is slower than the `cache` version because you have not cached the preprocessing. Serialized Tensors To save some preprocessing to the TFRecord file, first make a dataset of the processed images, as before:
###Code
paths_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
image_ds = paths_ds.map(load_and_preprocess_image)
image_ds
###Output
_____no_output_____
###Markdown
Now instead of a dataset of `.jpeg` strings, this is a dataset of tensors.To serialize this to a TFRecord file you first convert the dataset of tensors to a dataset of strings.
###Code
ds = image_ds.map(tf.serialize_tensor)
ds
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(ds)
###Output
_____no_output_____
###Markdown
With the preprocessing cached, data can be loaded from the TFRecord file quite efficiently. Just remember to de-serialize the tensor before trying to use it.
###Code
ds = tf.data.TFRecordDataset('images.tfrec')
def parse(x):
result = tf.parse_tensor(x, out_type=tf.float32)
result = tf.reshape(result, [192, 192, 3])
return result
ds = ds.map(parse, num_parallel_calls=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
Now, add the labels and apply the same standard operations as before:
###Code
ds = tf.data.Dataset.zip((ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Load images with tf.data Run in Google Colab View source on GitHub This tutorial provides a simple example of how to load an image dataset using `tf.data`.The dataset used in this example is distributed as directories of images, with one class of image per directory. Setup
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
tf.enable_eager_execution()
tf.__version__
AUTOTUNE = tf.data.experimental.AUTOTUNE
###Output
_____no_output_____
###Markdown
Download and inspect the dataset Retrieve the imagesBefore you start any training, you'll need a set of images to teach the network about the new classes you want to recognize. You've created an archive of creative-commons licensed flower photos to use initially.
###Code
import pathlib
data_root_orig = tf.keras.utils.get_file('flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
data_root = pathlib.Path(data_root_orig)
print(data_root)
###Output
_____no_output_____
###Markdown
After downloading 218MB, you should now have a copy of the flower photos available:
###Code
for item in data_root.iterdir():
print(item)
import random
all_image_paths = list(data_root.glob('*/*'))
all_image_paths = [str(path) for path in all_image_paths]
random.shuffle(all_image_paths)
image_count = len(all_image_paths)
image_count
all_image_paths[:10]
###Output
_____no_output_____
###Markdown
Inspect the imagesNow let's have a quick look at a couple of the images, so you know what you're dealing with:
###Code
import os
attributions = (data_root/"LICENSE.txt").open(encoding='utf-8').readlines()[4:]
attributions = [line.split(' CC-BY') for line in attributions]
attributions = dict(attributions)
import IPython.display as display
def caption_image(image_path):
image_rel = pathlib.Path(image_path).relative_to(data_root)
return "Image (CC BY 2.0) " + ' - '.join(attributions[str(image_rel)].split(' - ')[:-1])
for n in range(3):
image_path = random.choice(all_image_paths)
display.display(display.Image(image_path))
print(caption_image(image_path))
print()
###Output
_____no_output_____
###Markdown
Determine the label for each image List the available labels:
###Code
label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())
label_names
###Output
_____no_output_____
###Markdown
Assign an index to each label:
###Code
label_to_index = dict((name, index) for index,name in enumerate(label_names))
label_to_index
###Output
_____no_output_____
###Markdown
Create a list of every file, and its label index
###Code
all_image_labels = [label_to_index[pathlib.Path(path).parent.name]
for path in all_image_paths]
print("First 10 labels indices: ", all_image_labels[:10])
###Output
_____no_output_____
###Markdown
Load and format the images TensorFlow includes all the tools you need to load and process images:
###Code
img_path = all_image_paths[0]
img_path
###Output
_____no_output_____
###Markdown
here is the raw data:
###Code
img_raw = tf.io.read_file(img_path)
print(repr(img_raw)[:100]+"...")
###Output
_____no_output_____
###Markdown
Decode it into an image tensor:
###Code
img_tensor = tf.image.decode_image(img_raw)
print(img_tensor.shape)
print(img_tensor.dtype)
###Output
_____no_output_____
###Markdown
Resize it for your model:
###Code
img_final = tf.image.resize(img_tensor, [192, 192])
img_final = img_final/255.0
print(img_final.shape)
print(img_final.numpy().min())
print(img_final.numpy().max())
###Output
_____no_output_____
###Markdown
Wrap up these up in simple functions for later.
###Code
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [192, 192])
image /= 255.0 # normalize to [0,1] range
return image
def load_and_preprocess_image(path):
image = tf.read_file(path)
return preprocess_image(image)
import matplotlib.pyplot as plt
img_path = all_image_paths[0]
label = all_image_labels[0]
plt.imshow(load_and_preprocess_image(img_path))
plt.grid(False)
plt.xlabel(caption_image(img_path).encode('utf-8'))
plt.title(label_names[label].title())
print()
###Output
_____no_output_____
###Markdown
Build a `tf.data.Dataset` A dataset of images The easiest way to build a `tf.data.Dataset` is using the `from_tensor_slices` method.Slicing the array of strings results in a dataset of strings:
###Code
path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
###Output
_____no_output_____
###Markdown
The `output_shapes` and `output_types` fields describe the content of each item in the dataset. In this case it is a set of scalar binary-strings
###Code
print('shape: ', repr(path_ds.output_shapes))
print('type: ', path_ds.output_types)
print()
print(path_ds)
###Output
_____no_output_____
###Markdown
Now create a new dataset that loads and formats images on the fly by mapping `preprocess_image` over the dataset of paths.
###Code
image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
for n,image in enumerate(image_ds.take(4)):
plt.subplot(2,2,n+1)
plt.imshow(image)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.xlabel(caption_image(all_image_paths[n]))
plt.show()
###Output
_____no_output_____
###Markdown
A dataset of `(image, label)` pairs Using the same `from_tensor_slices` method you can build a dataset of labels
###Code
label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(all_image_labels, tf.int64))
for label in label_ds.take(10):
print(label_names[label.numpy()])
###Output
_____no_output_____
###Markdown
Since the datasets are in the same order you can just zip them together to get a dataset of `(image, label)` pairs.
###Code
image_label_ds = tf.data.Dataset.zip((image_ds, label_ds))
###Output
_____no_output_____
###Markdown
The new dataset's `shapes` and `types` are tuples of shapes and types as well, describing each field:
###Code
print(image_label_ds)
###Output
_____no_output_____
###Markdown
Note: When you have arrays like `all_image_labels` and `all_image_paths`, an alternative to using `tf.data.dataset.Dataset.zip` is slicing the pair of arrays.
###Code
ds = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels))
# The tuples are unpacked into the positional arguments of the mapped function
def load_and_preprocess_from_path_label(path, label):
return load_and_preprocess_image(path), label
image_label_ds = ds.map(load_and_preprocess_from_path_label)
image_label_ds
###Output
_____no_output_____
###Markdown
Basic methods for training To train a model with this dataset you will want the data:* To be well shuffled.* To be batched.* To repeat forever.* To have batches available as soon as possible.These features can be easily added using the `tf.data` api.
###Code
BATCH_SIZE = 32
# Setting a shuffle buffer size as large as the dataset ensures that the data is
# completely shuffled.
ds = image_label_ds.shuffle(buffer_size=image_count)
ds = ds.repeat()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches, in the background while the model is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
There are a few things to note here:1. The order is important. * A `.shuffle` *after* a `.repeat` would shuffle items across epoch boundaries (some items will be seen twice before others are seen at all). * A `.shuffle` *after* a `.batch` would shuffle the order of the batches, but not shuffle the items across batches.1. Use a `buffer_size` the same size as the dataset for a full shuffle. Up to the dataset size, large values provide better randomization, but use more memory.1. The shuffle buffer is filled before any elements are pulled from it. So a large `buffer_size` may cause a delay when your `Dataset` is starting.1. The shuffled dataset doesn't report the end of a dataset until the shuffle-buffer is completely empty. The `Dataset` is restarted by `.repeat`, causing another wait for the shuffle-buffer to be filled.This last point, as well as the order of `.shuffle` and `.repeat`, can be addressed by using the `tf.data.Dataset.apply` method with the fused `tf.data.experimental.shuffle_and_repeat` function:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE)
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
* For more on ordering the operations, see [Repeat and Shuffle](https://www.tensorflow.org/r1/guide/performance/datasetsrepeat_and_shuffle) in the Input Pipeline Performance guide. Pipe the dataset to a modelFetch a copy of MobileNet v2 from `tf.keras.applications`.This will be used for a simple transfer learning example.Set the MobileNet weights to be non-trainable:
###Code
mobile_net = tf.keras.applications.MobileNetV2(input_shape=(192, 192, 3), include_top=False)
mobile_net.trainable=False
###Output
_____no_output_____
###Markdown
This model expects its input to be normalized to the `[-1,1]` range:```help(keras_applications.mobilenet_v2.preprocess_input)```...This function applies the "Inception" preprocessing which convertsthe RGB values from [0, 255] to [-1, 1]... So before passing data to the MobileNet model, you need to convert the input from a range of `[0,1]` to `[-1,1]`.
###Code
def change_range(image,label):
return 2*image-1, label
keras_ds = ds.map(change_range)
###Output
_____no_output_____
###Markdown
The MobileNet returns a `6x6` spatial grid of features for each image.Pass it a batch of images to see:
###Code
# The dataset may take a few seconds to start, as it fills its shuffle buffer.
image_batch, label_batch = next(iter(keras_ds))
feature_map_batch = mobile_net(image_batch)
print(feature_map_batch.shape)
###Output
_____no_output_____
###Markdown
Because of this output shape, build a model wrapped around MobileNet using `tf.keras.layers.GlobalAveragePooling2D` to average over the space dimensions before the output `tf.keras.layers.Dense` layer:
###Code
model = tf.keras.Sequential([
mobile_net,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(len(label_names), activation = 'softmax')])
###Output
_____no_output_____
###Markdown
Now it produces outputs of the expected shape:
###Code
logit_batch = model(image_batch).numpy()
print("min logit:", logit_batch.min())
print("max logit:", logit_batch.max())
print()
print("Shape:", logit_batch.shape)
###Output
_____no_output_____
###Markdown
Compile the model to describe the training procedure:
###Code
model.compile(optimizer=tf.train.AdamOptimizer(),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
There are 2 trainable variables: the Dense `weights` and `bias`:
###Code
len(model.trainable_variables)
model.summary()
###Output
_____no_output_____
###Markdown
Train the model.Normally you would specify the real number of steps per epoch, but for demonstration purposes only run 3 steps.
###Code
steps_per_epoch=tf.ceil(len(all_image_paths)/BATCH_SIZE).numpy()
steps_per_epoch
model.fit(ds, epochs=1, steps_per_epoch=3)
###Output
_____no_output_____
###Markdown
PerformanceNote: This section just shows a couple of easy tricks that may help performance. For an in depth guide see [Input Pipeline Performance](https://www.tensorflow.org/r1/guide/performance/datasets).The simple pipeline used above reads each file individually, on each epoch. This is fine for local training on CPU but may not be sufficient for GPU training, and is totally inappropriate for any sort of distributed training. To investigate, first build a simple function to check the performance of our datasets:
###Code
import time
def timeit(ds, batches=2*steps_per_epoch+1):
overall_start = time.time()
# Fetch a single batch to prime the pipeline (fill the shuffle buffer),
# before starting the timer
it = iter(ds.take(batches+1))
next(it)
start = time.time()
for i,(images,labels) in enumerate(it):
if i%10 == 0:
print('.',end='')
print()
end = time.time()
duration = end-start
print("{} batches: {} s".format(batches, duration))
print("{:0.5f} Images/s".format(BATCH_SIZE*batches/duration))
print("Total time: {}s".format(end-overall_start))
###Output
_____no_output_____
###Markdown
The performance of the current dataset is:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Cache Use `tf.data.Dataset.cache` to easily cache calculations across epochs. This is especially performant if the data fits in memory.Here the images are cached, after being pre-precessed (decoded and resized):
###Code
ds = image_label_ds.cache()
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
One disadvantage to using an in-memory cache is that the cache must be rebuilt on each run, giving the same startup delay each time the dataset is started:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
If the data doesn't fit in memory, use a cache file:
###Code
ds = image_label_ds.cache(filename='./cache.tf-data')
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(1)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
The cache file also has the advantage that it can be used to quickly restart the dataset without rebuilding the cache. Note how much faster it is the second time:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
TFRecord File Raw image dataTFRecord files are a simple format for storing a sequence of binary blobs. By packing multiple examples into the same file, TensorFlow is able to read multiple examples at once, which is especially important for performance when using a remote storage service such as GCS.First, build a TFRecord file from the raw image data:
###Code
image_ds = tf.data.Dataset.from_tensor_slices(all_image_paths).map(tf.read_file)
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(image_ds)
###Output
_____no_output_____
###Markdown
Next build a dataset that reads from the TFRecord file and decodes/reformats the images using the `preprocess_image` function you defined earlier.
###Code
image_ds = tf.data.TFRecordDataset('images.tfrec').map(preprocess_image)
###Output
_____no_output_____
###Markdown
Zip that with the labels dataset you defined earlier, to get the expected `(image,label)` pairs.
###Code
ds = tf.data.Dataset.zip((image_ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
This is slower than the `cache` version because you have not cached the preprocessing. Serialized Tensors To save some preprocessing to the TFRecord file, first make a dataset of the processed images, as before:
###Code
paths_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
image_ds = paths_ds.map(load_and_preprocess_image)
image_ds
###Output
_____no_output_____
###Markdown
Now instead of a dataset of `.jpeg` strings, this is a dataset of tensors.To serialize this to a TFRecord file you first convert the dataset of tensors to a dataset of strings.
###Code
ds = image_ds.map(tf.serialize_tensor)
ds
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(ds)
###Output
_____no_output_____
###Markdown
With the preprocessing cached, data can be loaded from the TFRecord file quite efficiently. Just remember to de-serialize the tensor before trying to use it.
###Code
ds = tf.data.TFRecordDataset('images.tfrec')
def parse(x):
result = tf.parse_tensor(x, out_type=tf.float32)
result = tf.reshape(result, [192, 192, 3])
return result
ds = ds.map(parse, num_parallel_calls=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
Now, add the labels and apply the same standard operations as before:
###Code
ds = tf.data.Dataset.zip((ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Load images with tf.data Run in Google Colab View source on GitHub This tutorial provides a simple example of how to load an image dataset using `tf.data`.The dataset used in this example is distributed as directories of images, with one class of image per directory. Setup
###Code
import tensorflow.compat.v1 as tf
tf.__version__
AUTOTUNE = tf.data.experimental.AUTOTUNE
###Output
_____no_output_____
###Markdown
Download and inspect the dataset Retrieve the imagesBefore you start any training, you'll need a set of images to teach the network about the new classes you want to recognize. You've created an archive of creative-commons licensed flower photos to use initially.
###Code
import pathlib
data_root_orig = tf.keras.utils.get_file('flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
data_root = pathlib.Path(data_root_orig)
print(data_root)
###Output
_____no_output_____
###Markdown
After downloading 218MB, you should now have a copy of the flower photos available:
###Code
for item in data_root.iterdir():
print(item)
import random
all_image_paths = list(data_root.glob('*/*'))
all_image_paths = [str(path) for path in all_image_paths]
random.shuffle(all_image_paths)
image_count = len(all_image_paths)
image_count
all_image_paths[:10]
###Output
_____no_output_____
###Markdown
Inspect the imagesNow let's have a quick look at a couple of the images, so you know what you're dealing with:
###Code
import os
attributions = (data_root/"LICENSE.txt").open(encoding='utf-8').readlines()[4:]
attributions = [line.split(' CC-BY') for line in attributions]
attributions = dict(attributions)
import IPython.display as display
def caption_image(image_path):
image_rel = pathlib.Path(image_path).relative_to(data_root)
return "Image (CC BY 2.0) " + ' - '.join(attributions[str(image_rel)].split(' - ')[:-1])
for n in range(3):
image_path = random.choice(all_image_paths)
display.display(display.Image(image_path))
print(caption_image(image_path))
print()
###Output
_____no_output_____
###Markdown
Determine the label for each image List the available labels:
###Code
label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())
label_names
###Output
_____no_output_____
###Markdown
Assign an index to each label:
###Code
label_to_index = dict((name, index) for index,name in enumerate(label_names))
label_to_index
###Output
_____no_output_____
###Markdown
Create a list of every file, and its label index
###Code
all_image_labels = [label_to_index[pathlib.Path(path).parent.name]
for path in all_image_paths]
print("First 10 labels indices: ", all_image_labels[:10])
###Output
_____no_output_____
###Markdown
Load and format the images TensorFlow includes all the tools you need to load and process images:
###Code
img_path = all_image_paths[0]
img_path
###Output
_____no_output_____
###Markdown
here is the raw data:
###Code
img_raw = tf.io.read_file(img_path)
print(repr(img_raw)[:100]+"...")
###Output
_____no_output_____
###Markdown
Decode it into an image tensor:
###Code
img_tensor = tf.image.decode_image(img_raw)
print(img_tensor.shape)
print(img_tensor.dtype)
###Output
_____no_output_____
###Markdown
Resize it for your model:
###Code
img_final = tf.image.resize(img_tensor, [192, 192])
img_final = img_final/255.0
print(img_final.shape)
print(img_final.numpy().min())
print(img_final.numpy().max())
###Output
_____no_output_____
###Markdown
Wrap up these up in simple functions for later.
###Code
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [192, 192])
image /= 255.0 # normalize to [0,1] range
return image
def load_and_preprocess_image(path):
image = tf.read_file(path)
return preprocess_image(image)
import matplotlib.pyplot as plt
img_path = all_image_paths[0]
label = all_image_labels[0]
plt.imshow(load_and_preprocess_image(img_path))
plt.grid(False)
plt.xlabel(caption_image(img_path).encode('utf-8'))
plt.title(label_names[label].title())
print()
###Output
_____no_output_____
###Markdown
Build a `tf.data.Dataset` A dataset of images The easiest way to build a `tf.data.Dataset` is using the `from_tensor_slices` method.Slicing the array of strings results in a dataset of strings:
###Code
path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
###Output
_____no_output_____
###Markdown
The `output_shapes` and `output_types` fields describe the content of each item in the dataset. In this case it is a set of scalar binary-strings
###Code
print('shape: ', repr(path_ds.output_shapes))
print('type: ', path_ds.output_types)
print()
print(path_ds)
###Output
_____no_output_____
###Markdown
Now create a new dataset that loads and formats images on the fly by mapping `preprocess_image` over the dataset of paths.
###Code
image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
for n,image in enumerate(image_ds.take(4)):
plt.subplot(2,2,n+1)
plt.imshow(image)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.xlabel(caption_image(all_image_paths[n]))
plt.show()
###Output
_____no_output_____
###Markdown
A dataset of `(image, label)` pairs Using the same `from_tensor_slices` method you can build a dataset of labels
###Code
label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(all_image_labels, tf.int64))
for label in label_ds.take(10):
print(label_names[label.numpy()])
###Output
_____no_output_____
###Markdown
Since the datasets are in the same order you can just zip them together to get a dataset of `(image, label)` pairs.
###Code
image_label_ds = tf.data.Dataset.zip((image_ds, label_ds))
###Output
_____no_output_____
###Markdown
The new dataset's `shapes` and `types` are tuples of shapes and types as well, describing each field:
###Code
print(image_label_ds)
###Output
_____no_output_____
###Markdown
Note: When you have arrays like `all_image_labels` and `all_image_paths`, an alternative to using `tf.data.dataset.Dataset.zip` is slicing the pair of arrays.
###Code
ds = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels))
# The tuples are unpacked into the positional arguments of the mapped function
def load_and_preprocess_from_path_label(path, label):
return load_and_preprocess_image(path), label
image_label_ds = ds.map(load_and_preprocess_from_path_label)
image_label_ds
###Output
_____no_output_____
###Markdown
Basic methods for training To train a model with this dataset you will want the data:* To be well shuffled.* To be batched.* To repeat forever.* To have batches available as soon as possible.These features can be easily added using the `tf.data` api.
###Code
BATCH_SIZE = 32
# Setting a shuffle buffer size as large as the dataset ensures that the data is
# completely shuffled.
ds = image_label_ds.shuffle(buffer_size=image_count)
ds = ds.repeat()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches, in the background while the model is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
There are a few things to note here:1. The order is important. * A `.shuffle` *after* a `.repeat` would shuffle items across epoch boundaries (some items will be seen twice before others are seen at all). * A `.shuffle` *after* a `.batch` would shuffle the order of the batches, but not shuffle the items across batches.1. Use a `buffer_size` the same size as the dataset for a full shuffle. Up to the dataset size, large values provide better randomization, but use more memory.1. The shuffle buffer is filled before any elements are pulled from it. So a large `buffer_size` may cause a delay when your `Dataset` is starting.1. The shuffled dataset doesn't report the end of a dataset until the shuffle-buffer is completely empty. The `Dataset` is restarted by `.repeat`, causing another wait for the shuffle-buffer to be filled.This last point, as well as the order of `.shuffle` and `.repeat`, can be addressed by using the `tf.data.Dataset.apply` method with the fused `tf.data.experimental.shuffle_and_repeat` function:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE)
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
* For more on ordering the operations, see [Repeat and Shuffle](https://www.tensorflow.org/r1/guide/performance/datasetsrepeat_and_shuffle) in the Input Pipeline Performance guide. Pipe the dataset to a modelFetch a copy of MobileNet v2 from `tf.keras.applications`.This will be used for a simple transfer learning example.Set the MobileNet weights to be non-trainable:
###Code
mobile_net = tf.keras.applications.MobileNetV2(input_shape=(192, 192, 3), include_top=False)
mobile_net.trainable=False
###Output
_____no_output_____
###Markdown
This model expects its input to be normalized to the `[-1,1]` range:```help(keras_applications.mobilenet_v2.preprocess_input)```...This function applies the "Inception" preprocessing which convertsthe RGB values from [0, 255] to [-1, 1]... So before passing data to the MobileNet model, you need to convert the input from a range of `[0,1]` to `[-1,1]`.
###Code
def change_range(image,label):
return 2*image-1, label
keras_ds = ds.map(change_range)
###Output
_____no_output_____
###Markdown
The MobileNet returns a `6x6` spatial grid of features for each image.Pass it a batch of images to see:
###Code
# The dataset may take a few seconds to start, as it fills its shuffle buffer.
image_batch, label_batch = next(iter(keras_ds))
feature_map_batch = mobile_net(image_batch)
print(feature_map_batch.shape)
###Output
_____no_output_____
###Markdown
Because of this output shape, build a model wrapped around MobileNet using `tf.keras.layers.GlobalAveragePooling2D` to average over the space dimensions before the output `tf.keras.layers.Dense` layer:
###Code
model = tf.keras.Sequential([
mobile_net,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(len(label_names), activation = 'softmax')])
###Output
_____no_output_____
###Markdown
Now it produces outputs of the expected shape:
###Code
logit_batch = model(image_batch).numpy()
print("min logit:", logit_batch.min())
print("max logit:", logit_batch.max())
print()
print("Shape:", logit_batch.shape)
###Output
_____no_output_____
###Markdown
Compile the model to describe the training procedure:
###Code
model.compile(optimizer=tf.train.AdamOptimizer(),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
There are 2 trainable variables: the Dense `weights` and `bias`:
###Code
len(model.trainable_variables)
model.summary()
###Output
_____no_output_____
###Markdown
Train the model.Normally you would specify the real number of steps per epoch, but for demonstration purposes only run 3 steps.
###Code
steps_per_epoch=tf.ceil(len(all_image_paths)/BATCH_SIZE).numpy()
steps_per_epoch
model.fit(ds, epochs=1, steps_per_epoch=3)
###Output
_____no_output_____
###Markdown
PerformanceNote: This section just shows a couple of easy tricks that may help performance. For an in depth guide see [Input Pipeline Performance](https://www.tensorflow.org/r1/guide/performance/datasets).The simple pipeline used above reads each file individually, on each epoch. This is fine for local training on CPU but may not be sufficient for GPU training, and is totally inappropriate for any sort of distributed training. To investigate, first build a simple function to check the performance of our datasets:
###Code
import time
def timeit(ds, batches=2*steps_per_epoch+1):
overall_start = time.time()
# Fetch a single batch to prime the pipeline (fill the shuffle buffer),
# before starting the timer
it = iter(ds.take(batches+1))
next(it)
start = time.time()
for i,(images,labels) in enumerate(it):
if i%10 == 0:
print('.',end='')
print()
end = time.time()
duration = end-start
print("{} batches: {} s".format(batches, duration))
print("{:0.5f} Images/s".format(BATCH_SIZE*batches/duration))
print("Total time: {}s".format(end-overall_start))
###Output
_____no_output_____
###Markdown
The performance of the current dataset is:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Cache Use `tf.data.Dataset.cache` to easily cache calculations across epochs. This is especially performant if the data fits in memory.Here the images are cached, after being pre-precessed (decoded and resized):
###Code
ds = image_label_ds.cache()
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
One disadvantage to using an in-memory cache is that the cache must be rebuilt on each run, giving the same startup delay each time the dataset is started:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
If the data doesn't fit in memory, use a cache file:
###Code
ds = image_label_ds.cache(filename='./cache.tf-data')
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(1)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
The cache file also has the advantage that it can be used to quickly restart the dataset without rebuilding the cache. Note how much faster it is the second time:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
TFRecord File Raw image dataTFRecord files are a simple format for storing a sequence of binary blobs. By packing multiple examples into the same file, TensorFlow is able to read multiple examples at once, which is especially important for performance when using a remote storage service such as GCS.First, build a TFRecord file from the raw image data:
###Code
image_ds = tf.data.Dataset.from_tensor_slices(all_image_paths).map(tf.read_file)
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(image_ds)
###Output
_____no_output_____
###Markdown
Next build a dataset that reads from the TFRecord file and decodes/reformats the images using the `preprocess_image` function you defined earlier.
###Code
image_ds = tf.data.TFRecordDataset('images.tfrec').map(preprocess_image)
###Output
_____no_output_____
###Markdown
Zip that with the labels dataset you defined earlier, to get the expected `(image,label)` pairs.
###Code
ds = tf.data.Dataset.zip((image_ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
This is slower than the `cache` version because you have not cached the preprocessing. Serialized Tensors To save some preprocessing to the TFRecord file, first make a dataset of the processed images, as before:
###Code
paths_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
image_ds = paths_ds.map(load_and_preprocess_image)
image_ds
###Output
_____no_output_____
###Markdown
Now instead of a dataset of `.jpeg` strings, this is a dataset of tensors.To serialize this to a TFRecord file you first convert the dataset of tensors to a dataset of strings.
###Code
ds = image_ds.map(tf.serialize_tensor)
ds
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(ds)
###Output
_____no_output_____
###Markdown
With the preprocessing cached, data can be loaded from the TFRecord file quite efficiently. Just remember to de-serialize the tensor before trying to use it.
###Code
ds = tf.data.TFRecordDataset('images.tfrec')
def parse(x):
result = tf.parse_tensor(x, out_type=tf.float32)
result = tf.reshape(result, [192, 192, 3])
return result
ds = ds.map(parse, num_parallel_calls=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
Now, add the labels and apply the same standard operations as before:
###Code
ds = tf.data.Dataset.zip((ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Load images with tf.data Run in Google Colab View source on GitHub > Note: This is an archived TF1 notebook. These are configuredto run in TF2's [compatbility mode](https://www.tensorflow.org/guide/migrate)but will run in TF1 as well. To use TF1 in Colab, use the[%tensorflow_version 1.x](https://colab.research.google.com/notebooks/tensorflow_version.ipynb)magic. This tutorial provides a simple example of how to load an image dataset using `tf.data`.The dataset used in this example is distributed as directories of images, with one class of image per directory. Setup
###Code
import tensorflow.compat.v1 as tf
tf.__version__
AUTOTUNE = tf.data.experimental.AUTOTUNE
###Output
_____no_output_____
###Markdown
Download and inspect the dataset Retrieve the imagesBefore you start any training, you'll need a set of images to teach the network about the new classes you want to recognize. You've created an archive of creative-commons licensed flower photos to use initially.
###Code
import pathlib
data_root_orig = tf.keras.utils.get_file('flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
data_root = pathlib.Path(data_root_orig)
print(data_root)
###Output
_____no_output_____
###Markdown
After downloading 218MB, you should now have a copy of the flower photos available:
###Code
for item in data_root.iterdir():
print(item)
import random
all_image_paths = list(data_root.glob('*/*'))
all_image_paths = [str(path) for path in all_image_paths]
random.shuffle(all_image_paths)
image_count = len(all_image_paths)
image_count
all_image_paths[:10]
###Output
_____no_output_____
###Markdown
Inspect the imagesNow let's have a quick look at a couple of the images, so you know what you're dealing with:
###Code
import os
attributions = (data_root/"LICENSE.txt").open(encoding='utf-8').readlines()[4:]
attributions = [line.split(' CC-BY') for line in attributions]
attributions = dict(attributions)
import IPython.display as display
def caption_image(image_path):
image_rel = pathlib.Path(image_path).relative_to(data_root)
return "Image (CC BY 2.0) " + ' - '.join(attributions[str(image_rel)].split(' - ')[:-1])
for n in range(3):
image_path = random.choice(all_image_paths)
display.display(display.Image(image_path))
print(caption_image(image_path))
print()
###Output
_____no_output_____
###Markdown
Determine the label for each image List the available labels:
###Code
label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())
label_names
###Output
_____no_output_____
###Markdown
Assign an index to each label:
###Code
label_to_index = dict((name, index) for index,name in enumerate(label_names))
label_to_index
###Output
_____no_output_____
###Markdown
Create a list of every file, and its label index
###Code
all_image_labels = [label_to_index[pathlib.Path(path).parent.name]
for path in all_image_paths]
print("First 10 labels indices: ", all_image_labels[:10])
###Output
_____no_output_____
###Markdown
Load and format the images TensorFlow includes all the tools you need to load and process images:
###Code
img_path = all_image_paths[0]
img_path
###Output
_____no_output_____
###Markdown
here is the raw data:
###Code
img_raw = tf.io.read_file(img_path)
print(repr(img_raw)[:100]+"...")
###Output
_____no_output_____
###Markdown
Decode it into an image tensor:
###Code
img_tensor = tf.image.decode_image(img_raw)
print(img_tensor.shape)
print(img_tensor.dtype)
###Output
_____no_output_____
###Markdown
Resize it for your model:
###Code
img_final = tf.image.resize(img_tensor, [192, 192])
img_final = img_final/255.0
print(img_final.shape)
print(img_final.numpy().min())
print(img_final.numpy().max())
###Output
_____no_output_____
###Markdown
Wrap up these up in simple functions for later.
###Code
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [192, 192])
image /= 255.0 # normalize to [0,1] range
return image
def load_and_preprocess_image(path):
image = tf.read_file(path)
return preprocess_image(image)
import matplotlib.pyplot as plt
img_path = all_image_paths[0]
label = all_image_labels[0]
plt.imshow(load_and_preprocess_image(img_path))
plt.grid(False)
plt.xlabel(caption_image(img_path).encode('utf-8'))
plt.title(label_names[label].title())
print()
###Output
_____no_output_____
###Markdown
Build a `tf.data.Dataset` A dataset of images The easiest way to build a `tf.data.Dataset` is using the `from_tensor_slices` method.Slicing the array of strings results in a dataset of strings:
###Code
path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
###Output
_____no_output_____
###Markdown
The `output_shapes` and `output_types` fields describe the content of each item in the dataset. In this case it is a set of scalar binary-strings
###Code
print('shape: ', repr(path_ds.output_shapes))
print('type: ', path_ds.output_types)
print()
print(path_ds)
###Output
_____no_output_____
###Markdown
Now create a new dataset that loads and formats images on the fly by mapping `preprocess_image` over the dataset of paths.
###Code
image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
for n,image in enumerate(image_ds.take(4)):
plt.subplot(2,2,n+1)
plt.imshow(image)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.xlabel(caption_image(all_image_paths[n]))
plt.show()
###Output
_____no_output_____
###Markdown
A dataset of `(image, label)` pairs Using the same `from_tensor_slices` method you can build a dataset of labels
###Code
label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(all_image_labels, tf.int64))
for label in label_ds.take(10):
print(label_names[label.numpy()])
###Output
_____no_output_____
###Markdown
Since the datasets are in the same order you can just zip them together to get a dataset of `(image, label)` pairs.
###Code
image_label_ds = tf.data.Dataset.zip((image_ds, label_ds))
###Output
_____no_output_____
###Markdown
The new dataset's `shapes` and `types` are tuples of shapes and types as well, describing each field:
###Code
print(image_label_ds)
###Output
_____no_output_____
###Markdown
Note: When you have arrays like `all_image_labels` and `all_image_paths`, an alternative to using `tf.data.dataset.Dataset.zip` is slicing the pair of arrays.
###Code
ds = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels))
# The tuples are unpacked into the positional arguments of the mapped function
def load_and_preprocess_from_path_label(path, label):
return load_and_preprocess_image(path), label
image_label_ds = ds.map(load_and_preprocess_from_path_label)
image_label_ds
###Output
_____no_output_____
###Markdown
Basic methods for training To train a model with this dataset you will want the data:* To be well shuffled.* To be batched.* To repeat forever.* To have batches available as soon as possible.These features can be easily added using the `tf.data` api.
###Code
BATCH_SIZE = 32
# Setting a shuffle buffer size as large as the dataset ensures that the data is
# completely shuffled.
ds = image_label_ds.shuffle(buffer_size=image_count)
ds = ds.repeat()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches, in the background while the model is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
There are a few things to note here:1. The order is important. * A `.shuffle` *after* a `.repeat` would shuffle items across epoch boundaries (some items will be seen twice before others are seen at all). * A `.shuffle` *after* a `.batch` would shuffle the order of the batches, but not shuffle the items across batches.1. Use a `buffer_size` the same size as the dataset for a full shuffle. Up to the dataset size, large values provide better randomization, but use more memory.1. The shuffle buffer is filled before any elements are pulled from it. So a large `buffer_size` may cause a delay when your `Dataset` is starting.1. The shuffled dataset doesn't report the end of a dataset until the shuffle-buffer is completely empty. The `Dataset` is restarted by `.repeat`, causing another wait for the shuffle-buffer to be filled.This last point, as well as the order of `.shuffle` and `.repeat`, can be addressed by using the `tf.data.Dataset.apply` method with the fused `tf.data.experimental.shuffle_and_repeat` function:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE)
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
* For more on ordering the operations, see [Repeat and Shuffle](https://www.tensorflow.org/r1/guide/performance/datasetsrepeat_and_shuffle) in the Input Pipeline Performance guide. Pipe the dataset to a modelFetch a copy of MobileNet v2 from `tf.keras.applications`.This will be used for a simple transfer learning example.Set the MobileNet weights to be non-trainable:
###Code
mobile_net = tf.keras.applications.MobileNetV2(input_shape=(192, 192, 3), include_top=False)
mobile_net.trainable=False
###Output
_____no_output_____
###Markdown
This model expects its input to be normalized to the `[-1,1]` range:```help(keras_applications.mobilenet_v2.preprocess_input)```...This function applies the "Inception" preprocessing which convertsthe RGB values from [0, 255] to [-1, 1]... So before passing data to the MobileNet model, you need to convert the input from a range of `[0,1]` to `[-1,1]`.
###Code
def change_range(image,label):
return 2*image-1, label
keras_ds = ds.map(change_range)
###Output
_____no_output_____
###Markdown
The MobileNet returns a `6x6` spatial grid of features for each image.Pass it a batch of images to see:
###Code
# The dataset may take a few seconds to start, as it fills its shuffle buffer.
image_batch, label_batch = next(iter(keras_ds))
feature_map_batch = mobile_net(image_batch)
print(feature_map_batch.shape)
###Output
_____no_output_____
###Markdown
Because of this output shape, build a model wrapped around MobileNet using `tf.keras.layers.GlobalAveragePooling2D` to average over the space dimensions before the output `tf.keras.layers.Dense` layer:
###Code
model = tf.keras.Sequential([
mobile_net,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(len(label_names), activation = 'softmax')])
###Output
_____no_output_____
###Markdown
Now it produces outputs of the expected shape:
###Code
logit_batch = model(image_batch).numpy()
print("min logit:", logit_batch.min())
print("max logit:", logit_batch.max())
print()
print("Shape:", logit_batch.shape)
###Output
_____no_output_____
###Markdown
Compile the model to describe the training procedure:
###Code
model.compile(optimizer=tf.train.AdamOptimizer(),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
There are 2 trainable variables: the Dense `weights` and `bias`:
###Code
len(model.trainable_variables)
model.summary()
###Output
_____no_output_____
###Markdown
Train the model.Normally you would specify the real number of steps per epoch, but for demonstration purposes only run 3 steps.
###Code
steps_per_epoch=tf.ceil(len(all_image_paths)/BATCH_SIZE).numpy()
steps_per_epoch
model.fit(ds, epochs=1, steps_per_epoch=3)
###Output
_____no_output_____
###Markdown
PerformanceNote: This section just shows a couple of easy tricks that may help performance. For an in depth guide see [Input Pipeline Performance](https://www.tensorflow.org/r1/guide/performance/datasets).The simple pipeline used above reads each file individually, on each epoch. This is fine for local training on CPU but may not be sufficient for GPU training, and is totally inappropriate for any sort of distributed training. To investigate, first build a simple function to check the performance of our datasets:
###Code
import time
def timeit(ds, batches=2*steps_per_epoch+1):
overall_start = time.time()
# Fetch a single batch to prime the pipeline (fill the shuffle buffer),
# before starting the timer
it = iter(ds.take(batches+1))
next(it)
start = time.time()
for i,(images,labels) in enumerate(it):
if i%10 == 0:
print('.',end='')
print()
end = time.time()
duration = end-start
print("{} batches: {} s".format(batches, duration))
print("{:0.5f} Images/s".format(BATCH_SIZE*batches/duration))
print("Total time: {}s".format(end-overall_start))
###Output
_____no_output_____
###Markdown
The performance of the current dataset is:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Cache Use `tf.data.Dataset.cache` to easily cache calculations across epochs. This is especially performant if the data fits in memory.Here the images are cached, after being pre-precessed (decoded and resized):
###Code
ds = image_label_ds.cache()
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
One disadvantage to using an in-memory cache is that the cache must be rebuilt on each run, giving the same startup delay each time the dataset is started:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
If the data doesn't fit in memory, use a cache file:
###Code
ds = image_label_ds.cache(filename='./cache.tf-data')
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(1)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
The cache file also has the advantage that it can be used to quickly restart the dataset without rebuilding the cache. Note how much faster it is the second time:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
TFRecord File Raw image dataTFRecord files are a simple format for storing a sequence of binary blobs. By packing multiple examples into the same file, TensorFlow is able to read multiple examples at once, which is especially important for performance when using a remote storage service such as GCS.First, build a TFRecord file from the raw image data:
###Code
image_ds = tf.data.Dataset.from_tensor_slices(all_image_paths).map(tf.read_file)
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(image_ds)
###Output
_____no_output_____
###Markdown
Next build a dataset that reads from the TFRecord file and decodes/reformats the images using the `preprocess_image` function you defined earlier.
###Code
image_ds = tf.data.TFRecordDataset('images.tfrec').map(preprocess_image)
###Output
_____no_output_____
###Markdown
Zip that with the labels dataset you defined earlier, to get the expected `(image,label)` pairs.
###Code
ds = tf.data.Dataset.zip((image_ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
This is slower than the `cache` version because you have not cached the preprocessing. Serialized Tensors To save some preprocessing to the TFRecord file, first make a dataset of the processed images, as before:
###Code
paths_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
image_ds = paths_ds.map(load_and_preprocess_image)
image_ds
###Output
_____no_output_____
###Markdown
Now instead of a dataset of `.jpeg` strings, this is a dataset of tensors.To serialize this to a TFRecord file you first convert the dataset of tensors to a dataset of strings.
###Code
ds = image_ds.map(tf.serialize_tensor)
ds
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(ds)
###Output
_____no_output_____
###Markdown
With the preprocessing cached, data can be loaded from the TFRecord file quite efficiently. Just remember to de-serialize the tensor before trying to use it.
###Code
ds = tf.data.TFRecordDataset('images.tfrec')
def parse(x):
result = tf.parse_tensor(x, out_type=tf.float32)
result = tf.reshape(result, [192, 192, 3])
return result
ds = ds.map(parse, num_parallel_calls=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
Now, add the labels and apply the same standard operations as before:
###Code
ds = tf.data.Dataset.zip((ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Load images with tf.data Run in Google Colab View source on GitHub This tutorial provides a simple example of how to load an image dataset using `tf.data`.The dataset used in this example is distributed as directories of images, with one class of image per directory. Setup
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
tf.enable_eager_execution()
tf.__version__
AUTOTUNE = tf.data.experimental.AUTOTUNE
###Output
_____no_output_____
###Markdown
Download and inspect the dataset Retrieve the imagesBefore you start any training, you'll need a set of images to teach the network about the new classes you want to recognize. You've created an archive of creative-commons licensed flower photos to use initially.
###Code
import pathlib
data_root_orig = tf.keras.utils.get_file('flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
data_root = pathlib.Path(data_root_orig)
print(data_root)
###Output
_____no_output_____
###Markdown
After downloading 218MB, you should now have a copy of the flower photos available:
###Code
for item in data_root.iterdir():
print(item)
import random
all_image_paths = list(data_root.glob('*/*'))
all_image_paths = [str(path) for path in all_image_paths]
random.shuffle(all_image_paths)
image_count = len(all_image_paths)
image_count
all_image_paths[:10]
###Output
_____no_output_____
###Markdown
Inspect the imagesNow let's have a quick look at a couple of the images, so you know what you're dealing with:
###Code
import os
attributions = (data_root/"LICENSE.txt").open(encoding='utf-8').readlines()[4:]
attributions = [line.split(' CC-BY') for line in attributions]
attributions = dict(attributions)
import IPython.display as display
def caption_image(image_path):
image_rel = pathlib.Path(image_path).relative_to(data_root)
return "Image (CC BY 2.0) " + ' - '.join(attributions[str(image_rel)].split(' - ')[:-1])
for n in range(3):
image_path = random.choice(all_image_paths)
display.display(display.Image(image_path))
print(caption_image(image_path))
print()
###Output
_____no_output_____
###Markdown
Determine the label for each image List the available labels:
###Code
label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())
label_names
###Output
_____no_output_____
###Markdown
Assign an index to each label:
###Code
label_to_index = dict((name, index) for index,name in enumerate(label_names))
label_to_index
###Output
_____no_output_____
###Markdown
Create a list of every file, and its label index
###Code
all_image_labels = [label_to_index[pathlib.Path(path).parent.name]
for path in all_image_paths]
print("First 10 labels indices: ", all_image_labels[:10])
###Output
_____no_output_____
###Markdown
Load and format the images TensorFlow includes all the tools you need to load and process images:
###Code
img_path = all_image_paths[0]
img_path
###Output
_____no_output_____
###Markdown
here is the raw data:
###Code
img_raw = tf.io.read_file(img_path)
print(repr(img_raw)[:100]+"...")
###Output
_____no_output_____
###Markdown
Decode it into an image tensor:
###Code
img_tensor = tf.image.decode_image(img_raw)
print(img_tensor.shape)
print(img_tensor.dtype)
###Output
_____no_output_____
###Markdown
Resize it for your model:
###Code
img_final = tf.image.resize(img_tensor, [192, 192])
img_final = img_final/255.0
print(img_final.shape)
print(img_final.numpy().min())
print(img_final.numpy().max())
###Output
_____no_output_____
###Markdown
Wrap up these up in simple functions for later.
###Code
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [192, 192])
image /= 255.0 # normalize to [0,1] range
return image
def load_and_preprocess_image(path):
image = tf.read_file(path)
return preprocess_image(image)
import matplotlib.pyplot as plt
img_path = all_image_paths[0]
label = all_image_labels[0]
plt.imshow(load_and_preprocess_image(img_path))
plt.grid(False)
plt.xlabel(caption_image(img_path).encode('utf-8'))
plt.title(label_names[label].title())
print()
###Output
_____no_output_____
###Markdown
Build a `tf.data.Dataset` A dataset of images The easiest way to build a `tf.data.Dataset` is using the `from_tensor_slices` method.Slicing the array of strings results in a dataset of strings:
###Code
path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
###Output
_____no_output_____
###Markdown
The `output_shapes` and `output_types` fields describe the content of each item in the dataset. In this case it is a set of scalar binary-strings
###Code
print('shape: ', repr(path_ds.output_shapes))
print('type: ', path_ds.output_types)
print()
print(path_ds)
###Output
_____no_output_____
###Markdown
Now create a new dataset that loads and formats images on the fly by mapping `preprocess_image` over the dataset of paths.
###Code
image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
for n,image in enumerate(image_ds.take(4)):
plt.subplot(2,2,n+1)
plt.imshow(image)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.xlabel(caption_image(all_image_paths[n]))
plt.show()
###Output
_____no_output_____
###Markdown
A dataset of `(image, label)` pairs Using the same `from_tensor_slices` method you can build a dataset of labels
###Code
label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(all_image_labels, tf.int64))
for label in label_ds.take(10):
print(label_names[label.numpy()])
###Output
_____no_output_____
###Markdown
Since the datasets are in the same order you can just zip them together to get a dataset of `(image, label)` pairs.
###Code
image_label_ds = tf.data.Dataset.zip((image_ds, label_ds))
###Output
_____no_output_____
###Markdown
The new dataset's `shapes` and `types` are tuples of shapes and types as well, describing each field:
###Code
print(image_label_ds)
###Output
_____no_output_____
###Markdown
Note: When you have arrays like `all_image_labels` and `all_image_paths`, an alternative to using `tf.data.dataset.Dataset.zip` is slicing the pair of arrays.
###Code
ds = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels))
# The tuples are unpacked into the positional arguments of the mapped function
def load_and_preprocess_from_path_label(path, label):
return load_and_preprocess_image(path), label
image_label_ds = ds.map(load_and_preprocess_from_path_label)
image_label_ds
###Output
_____no_output_____
###Markdown
Basic methods for training To train a model with this dataset you will want the data:* To be well shuffled.* To be batched.* To repeat forever.* To have batches available as soon as possible.These features can be easily added using the `tf.data` api.
###Code
BATCH_SIZE = 32
# Setting a shuffle buffer size as large as the dataset ensures that the data is
# completely shuffled.
ds = image_label_ds.shuffle(buffer_size=image_count)
ds = ds.repeat()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches, in the background while the model is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
There are a few things to note here:1. The order is important. * A `.shuffle` *after* a `.repeat` would shuffle items across epoch boundaries (some items will be seen twice before others are seen at all). * A `.shuffle` *after* a `.batch` would shuffle the order of the batches, but not shuffle the items across batches.1. Use a `buffer_size` the same size as the dataset for a full shuffle. Up to the dataset size, large values provide better randomization, but use more memory.1. The shuffle buffer is filled before any elements are pulled from it. So a large `buffer_size` may cause a delay when your `Dataset` is starting.1. The shuffled dataset doesn't report the end of a dataset until the shuffle-buffer is completely empty. The `Dataset` is restarted by `.repeat`, causing another wait for the shuffle-buffer to be filled.This last point, as well as the order of `.shuffle` and `.repeat`, can be addressed by using the `tf.data.Dataset.apply` method with the fused `tf.data.experimental.shuffle_and_repeat` function:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE)
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
* For more on ordering the operations, see [Repeat and Shuffle](https://www.tensorflow.org/r1/guide/performance/datasetsrepeat_and_shuffle) in the Input Pipeline Performance guide. Pipe the dataset to a modelFetch a copy of MobileNet v2 from `tf.keras.applications`.This will be used for a simple transfer learning example.Set the MobileNet weights to be non-trainable:
###Code
mobile_net = tf.keras.applications.MobileNetV2(input_shape=(192, 192, 3), include_top=False)
mobile_net.trainable=False
###Output
_____no_output_____
###Markdown
This model expects its input to be normalized to the `[-1,1]` range:```help(keras_applications.mobilenet_v2.preprocess_input)```...This function applies the "Inception" preprocessing which convertsthe RGB values from [0, 255] to [-1, 1]... So before passing data to the MobileNet model, you need to convert the input from a range of `[0,1]` to `[-1,1]`.
###Code
def change_range(image,label):
return 2*image-1, label
keras_ds = ds.map(change_range)
###Output
_____no_output_____
###Markdown
The MobileNet returns a `6x6` spatial grid of features for each image.Pass it a batch of images to see:
###Code
# The dataset may take a few seconds to start, as it fills its shuffle buffer.
image_batch, label_batch = next(iter(keras_ds))
feature_map_batch = mobile_net(image_batch)
print(feature_map_batch.shape)
###Output
_____no_output_____
###Markdown
* For more on ordering the operations, see [Repeat and Shuffle](https://www.tensorflow.org/r1/guide/performance/datasetsrepeat_and_shuffle) in the Input Pipeline Performance guide. Pipe the dataset to a modelFetch a copy of MobileNet v2 from `tf.keras.applications`.This will be used for a simple transfer learning example.Set the MobileNet weights to be non-trainable:
###Code
mobile_net = tf.keras.applications.MobileNetV2(input_shape=(192, 192, 3), include_top=False)
mobile_net.trainable=False
###Output
_____no_output_____
###Markdown
This model expects its input to be normalized to the `[-1,1]` range:```help(keras_applications.mobilenet_v2.preprocess_input)```...This function applies the "Inception" preprocessing which convertsthe RGB values from [0, 255] to [-1, 1]... So before passing data to the MobileNet model, you need to convert the input from a range of `[0,1]` to `[-1,1]`.
###Code
def change_range(image,label):
return 2*image-1, label
keras_ds = ds.map(change_range)
###Output
_____no_output_____
###Markdown
The MobileNet returns a `6x6` spatial grid of features for each image.Pass it a batch of images to see:
###Code
# The dataset may take a few seconds to start, as it fills its shuffle buffer.
image_batch, label_batch = next(iter(keras_ds))
feature_map_batch = mobile_net(image_batch)
print(feature_map_batch.shape)
###Output
_____no_output_____
###Markdown
Because of this output shape, build a model wrapped around MobileNet using `tf.keras.layers.GlobalAveragePooling2D` to average over the space dimensions before the output `tf.keras.layers.Dense` layer:
###Code
model = tf.keras.Sequential([
mobile_net,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(len(label_names), activation = 'softmax')])
###Output
_____no_output_____
###Markdown
Now it produces outputs of the expected shape:
###Code
logit_batch = model(image_batch).numpy()
print("min logit:", logit_batch.min())
print("max logit:", logit_batch.max())
print()
print("Shape:", logit_batch.shape)
###Output
_____no_output_____
###Markdown
Compile the model to describe the training procedure:
###Code
model.compile(optimizer=tf.train.AdamOptimizer(),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
There are 2 trainable variables: the Dense `weights` and `bias`:
###Code
len(model.trainable_variables)
model.summary()
###Output
_____no_output_____
###Markdown
Train the model.Normally you would specify the real number of steps per epoch, but for demonstration purposes only run 3 steps.
###Code
steps_per_epoch=tf.ceil(len(all_image_paths)/BATCH_SIZE).numpy()
steps_per_epoch
model.fit(ds, epochs=1, steps_per_epoch=3)
###Output
_____no_output_____
###Markdown
PerformanceNote: This section just shows a couple of easy tricks that may help performance. For an in depth guide see [Input Pipeline Performance](https://www.tensorflow.org/r1/guide/performance/datasets).The simple pipeline used above reads each file individually, on each epoch. This is fine for local training on CPU but may not be sufficient for GPU training, and is totally inappropriate for any sort of distributed training. To investigate, first build a simple function to check the performance of our datasets:
###Code
import time
def timeit(ds, batches=2*steps_per_epoch+1):
overall_start = time.time()
# Fetch a single batch to prime the pipeline (fill the shuffle buffer),
# before starting the timer
it = iter(ds.take(batches+1))
next(it)
start = time.time()
for i,(images,labels) in enumerate(it):
if i%10 == 0:
print('.',end='')
print()
end = time.time()
duration = end-start
print("{} batches: {} s".format(batches, duration))
print("{:0.5f} Images/s".format(BATCH_SIZE*batches/duration))
print("Total time: {}s".format(end-overall_start))
###Output
_____no_output_____
###Markdown
The performance of the current dataset is:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Cache Use `tf.data.Dataset.cache` to easily cache calculations across epochs. This is especially performant if the data fits in memory.Here the images are cached, after being pre-precessed (decoded and resized):
###Code
ds = image_label_ds.cache()
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
One disadvantage to using an in-memory cache is that the cache must be rebuilt on each run, giving the same startup delay each time the dataset is started:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
If the data doesn't fit in memory, use a cache file:
###Code
ds = image_label_ds.cache(filename='./cache.tf-data')
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(1)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
The cache file also has the advantage that it can be used to quickly restart the dataset without rebuilding the cache. Note how much faster it is the second time:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
TFRecord File Raw image dataTFRecord files are a simple format for storing a sequence of binary blobs. By packing multiple examples into the same file, TensorFlow is able to read multiple examples at once, which is especially important for performance when using a remote storage service such as GCS.First, build a TFRecord file from the raw image data:
###Code
image_ds = tf.data.Dataset.from_tensor_slices(all_image_paths).map(tf.read_file)
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(image_ds)
###Output
_____no_output_____
###Markdown
Next build a dataset that reads from the TFRecord file and decodes/reformats the images using the `preprocess_image` function you defined earlier.
###Code
image_ds = tf.data.TFRecordDataset('images.tfrec').map(preprocess_image)
###Output
_____no_output_____
###Markdown
Zip that with the labels dataset you defined earlier, to get the expected `(image,label)` pairs.
###Code
ds = tf.data.Dataset.zip((image_ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
This is slower than the `cache` version because you have not cached the preprocessing. Serialized Tensors To save some preprocessing to the TFRecord file, first make a dataset of the processed images, as before:
###Code
paths_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
image_ds = paths_ds.map(load_and_preprocess_image)
image_ds
###Output
_____no_output_____
###Markdown
Now instead of a dataset of `.jpeg` strings, this is a dataset of tensors.To serialize this to a TFRecord file you first convert the dataset of tensors to a dataset of strings.
###Code
ds = image_ds.map(tf.serialize_tensor)
ds
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(ds)
###Output
_____no_output_____
###Markdown
With the preprocessing cached, data can be loaded from the TFRecord file quite efficiently. Just remember to de-serialize the tensor before trying to use it.
###Code
ds = tf.data.TFRecordDataset('images.tfrec')
def parse(x):
result = tf.parse_tensor(x, out_type=tf.float32)
result = tf.reshape(result, [192, 192, 3])
return result
ds = ds.map(parse, num_parallel_calls=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
Now, add the labels and apply the same standard operations as before:
###Code
ds = tf.data.Dataset.zip((ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Load images with tf.data Run in Google Colab View source on GitHub > Note: This is an archived TF1 notebook. These are configuredto run in TF2's [compatbility mode](https://www.tensorflow.org/guide/migrate)but will run in TF1 as well. To use TF1 in Colab, use the[%tensorflow_version 1.x](https://colab.research.google.com/notebooks/tensorflow_version.ipynb)magic. This tutorial provides a simple example of how to load an image dataset using `tf.data`.The dataset used in this example is distributed as directories of images, with one class of image per directory. Setup
###Code
import tensorflow.compat.v1 as tf
tf.__version__
AUTOTUNE = tf.data.experimental.AUTOTUNE
###Output
_____no_output_____
###Markdown
Download and inspect the dataset Retrieve the imagesBefore you start any training, you'll need a set of images to teach the network about the new classes you want to recognize. You've created an archive of creative-commons licensed flower photos to use initially.
###Code
import pathlib
data_root_orig = tf.keras.utils.get_file('flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
data_root = pathlib.Path(data_root_orig)
print(data_root)
###Output
_____no_output_____
###Markdown
After downloading 218MB, you should now have a copy of the flower photos available:
###Code
for item in data_root.iterdir():
print(item)
import random
all_image_paths = list(data_root.glob('*/*'))
all_image_paths = [str(path) for path in all_image_paths]
random.shuffle(all_image_paths)
image_count = len(all_image_paths)
image_count
all_image_paths[:10]
###Output
_____no_output_____
###Markdown
Inspect the imagesNow let's have a quick look at a couple of the images, so you know what you're dealing with:
###Code
import os
attributions = (data_root/"LICENSE.txt").open(encoding='utf-8').readlines()[4:]
attributions = [line.split(' CC-BY') for line in attributions]
attributions = dict(attributions)
import IPython.display as display
def caption_image(image_path):
image_rel = pathlib.Path(image_path).relative_to(data_root)
return "Image (CC BY 2.0) " + ' - '.join(attributions[str(image_rel)].split(' - ')[:-1])
for n in range(3):
image_path = random.choice(all_image_paths)
display.display(display.Image(image_path))
print(caption_image(image_path))
print()
###Output
_____no_output_____
###Markdown
Determine the label for each image List the available labels:
###Code
label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())
label_names
###Output
_____no_output_____
###Markdown
Assign an index to each label:
###Code
label_to_index = dict((name, index) for index,name in enumerate(label_names))
label_to_index
###Output
_____no_output_____
###Markdown
Create a list of every file, and its label index
###Code
all_image_labels = [label_to_index[pathlib.Path(path).parent.name]
for path in all_image_paths]
print("First 10 labels indices: ", all_image_labels[:10])
###Output
_____no_output_____
###Markdown
Load and format the images TensorFlow includes all the tools you need to load and process images:
###Code
img_path = all_image_paths[0]
img_path
###Output
_____no_output_____
###Markdown
here is the raw data:
###Code
img_raw = tf.io.read_file(img_path)
print(repr(img_raw)[:100]+"...")
###Output
_____no_output_____
###Markdown
Decode it into an image tensor:
###Code
img_tensor = tf.image.decode_image(img_raw)
print(img_tensor.shape)
print(img_tensor.dtype)
###Output
_____no_output_____
###Markdown
Resize it for your model:
###Code
img_final = tf.image.resize(img_tensor, [192, 192])
img_final = img_final/255.0
print(img_final.shape)
print(img_final.numpy().min())
print(img_final.numpy().max())
###Output
_____no_output_____
###Markdown
Wrap up these up in simple functions for later.
###Code
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [192, 192])
image /= 255.0 # normalize to [0,1] range
return image
def load_and_preprocess_image(path):
image = tf.read_file(path)
return preprocess_image(image)
import matplotlib.pyplot as plt
img_path = all_image_paths[0]
label = all_image_labels[0]
plt.imshow(load_and_preprocess_image(img_path))
plt.grid(False)
plt.xlabel(caption_image(img_path).encode('utf-8'))
plt.title(label_names[label].title())
print()
###Output
_____no_output_____
###Markdown
Build a `tf.data.Dataset` A dataset of images The easiest way to build a `tf.data.Dataset` is using the `from_tensor_slices` method.Slicing the array of strings results in a dataset of strings:
###Code
path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
###Output
_____no_output_____
###Markdown
The `output_shapes` and `output_types` fields describe the content of each item in the dataset. In this case it is a set of scalar binary-strings
###Code
print('shape: ', repr(path_ds.output_shapes))
print('type: ', path_ds.output_types)
print()
print(path_ds)
###Output
_____no_output_____
###Markdown
Now create a new dataset that loads and formats images on the fly by mapping `preprocess_image` over the dataset of paths.
###Code
image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
for n,image in enumerate(image_ds.take(4)):
plt.subplot(2,2,n+1)
plt.imshow(image)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.xlabel(caption_image(all_image_paths[n]))
plt.show()
###Output
_____no_output_____
###Markdown
A dataset of `(image, label)` pairs Using the same `from_tensor_slices` method you can build a dataset of labels
###Code
label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(all_image_labels, tf.int64))
for label in label_ds.take(10):
print(label_names[label.numpy()])
###Output
_____no_output_____
###Markdown
Since the datasets are in the same order you can just zip them together to get a dataset of `(image, label)` pairs.
###Code
image_label_ds = tf.data.Dataset.zip((image_ds, label_ds))
###Output
_____no_output_____
###Markdown
The new dataset's `shapes` and `types` are tuples of shapes and types as well, describing each field:
###Code
print(image_label_ds)
###Output
_____no_output_____
###Markdown
Note: When you have arrays like `all_image_labels` and `all_image_paths`, an alternative to using `tf.data.dataset.Dataset.zip` is slicing the pair of arrays.
###Code
ds = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels))
# The tuples are unpacked into the positional arguments of the mapped function
def load_and_preprocess_from_path_label(path, label):
return load_and_preprocess_image(path), label
image_label_ds = ds.map(load_and_preprocess_from_path_label)
image_label_ds
###Output
_____no_output_____
###Markdown
Basic methods for training To train a model with this dataset you will want the data:* To be well shuffled.* To be batched.* To repeat forever.* To have batches available as soon as possible.These features can be easily added using the `tf.data` api.
###Code
BATCH_SIZE = 32
# Setting a shuffle buffer size as large as the dataset ensures that the data is
# completely shuffled.
ds = image_label_ds.shuffle(buffer_size=image_count)
ds = ds.repeat()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches, in the background while the model is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
There are a few things to note here:1. The order is important. * A `.shuffle` *after* a `.repeat` would shuffle items across epoch boundaries (some items will be seen twice before others are seen at all). * A `.shuffle` *after* a `.batch` would shuffle the order of the batches, but not shuffle the items across batches.1. Use a `buffer_size` the same size as the dataset for a full shuffle. Up to the dataset size, large values provide better randomization, but use more memory.1. The shuffle buffer is filled before any elements are pulled from it. So a large `buffer_size` may cause a delay when your `Dataset` is starting.1. The shuffled dataset doesn't report the end of a dataset until the shuffle-buffer is completely empty. The `Dataset` is restarted by `.repeat`, causing another wait for the shuffle-buffer to be filled.This last point, as well as the order of `.shuffle` and `.repeat`, can be addressed by using the `tf.data.Dataset.apply` method with the fused `tf.data.experimental.shuffle_and_repeat` function:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE)
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
* For more on ordering the operations, see [Repeat and Shuffle](https://www.tensorflow.org/r1/guide/performance/datasetsrepeat_and_shuffle) in the Input Pipeline Performance guide. Pipe the dataset to a modelFetch a copy of MobileNet v2 from `tf.keras.applications`.This will be used for a simple transfer learning example.Set the MobileNet weights to be non-trainable:
###Code
mobile_net = tf.keras.applications.MobileNetV2(input_shape=(192, 192, 3), include_top=False)
mobile_net.trainable=False
###Output
_____no_output_____
###Markdown
This model expects its input to be normalized to the `[-1,1]` range:```help(keras_applications.mobilenet_v2.preprocess_input)```...This function applies the "Inception" preprocessing which convertsthe RGB values from [0, 255] to [-1, 1]... So before passing data to the MobileNet model, you need to convert the input from a range of `[0,1]` to `[-1,1]`.
###Code
def change_range(image,label):
return 2*image-1, label
keras_ds = ds.map(change_range)
###Output
_____no_output_____
###Markdown
The MobileNet returns a `6x6` spatial grid of features for each image.Pass it a batch of images to see:
###Code
# The dataset may take a few seconds to start, as it fills its shuffle buffer.
image_batch, label_batch = next(iter(keras_ds))
feature_map_batch = mobile_net(image_batch)
print(feature_map_batch.shape)
###Output
_____no_output_____
###Markdown
Because of this output shape, build a model wrapped around MobileNet using `tf.keras.layers.GlobalAveragePooling2D` to average over the space dimensions before the output `tf.keras.layers.Dense` layer:
###Code
model = tf.keras.Sequential([
mobile_net,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(len(label_names), activation = 'softmax')])
###Output
_____no_output_____
###Markdown
Now it produces outputs of the expected shape:
###Code
logit_batch = model(image_batch).numpy()
print("min logit:", logit_batch.min())
print("max logit:", logit_batch.max())
print()
print("Shape:", logit_batch.shape)
###Output
_____no_output_____
###Markdown
Compile the model to describe the training procedure:
###Code
model.compile(optimizer=tf.train.AdamOptimizer(),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
There are 2 trainable variables: the Dense `weights` and `bias`:
###Code
len(model.trainable_variables)
model.summary()
###Output
_____no_output_____
###Markdown
Train the model.Normally you would specify the real number of steps per epoch, but for demonstration purposes only run 3 steps.
###Code
steps_per_epoch=tf.ceil(len(all_image_paths)/BATCH_SIZE).numpy()
steps_per_epoch
model.fit(ds, epochs=1, steps_per_epoch=3)
###Output
_____no_output_____
###Markdown
PerformanceNote: This section just shows a couple of easy tricks that may help performance. For an in depth guide see [Input Pipeline Performance](https://www.tensorflow.org/r1/guide/performance/datasets).The simple pipeline used above reads each file individually, on each epoch. This is fine for local training on CPU but may not be sufficient for GPU training, and is totally inappropriate for any sort of distributed training. To investigate, first build a simple function to check the performance of our datasets:
###Code
import time
def timeit(ds, batches=2*steps_per_epoch+1):
overall_start = time.time()
# Fetch a single batch to prime the pipeline (fill the shuffle buffer),
# before starting the timer
it = iter(ds.take(batches+1))
next(it)
start = time.time()
for i,(images,labels) in enumerate(it):
if i%10 == 0:
print('.',end='')
print()
end = time.time()
duration = end-start
print("{} batches: {} s".format(batches, duration))
print("{:0.5f} Images/s".format(BATCH_SIZE*batches/duration))
print("Total time: {}s".format(end-overall_start))
###Output
_____no_output_____
###Markdown
The performance of the current dataset is:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Cache Use `tf.data.Dataset.cache` to easily cache calculations across epochs. This is especially performant if the data fits in memory.Here the images are cached, after being pre-precessed (decoded and resized):
###Code
ds = image_label_ds.cache()
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
One disadvantage to using an in-memory cache is that the cache must be rebuilt on each run, giving the same startup delay each time the dataset is started:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
If the data doesn't fit in memory, use a cache file:
###Code
ds = image_label_ds.cache(filename='./cache.tf-data')
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(1)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
The cache file also has the advantage that it can be used to quickly restart the dataset without rebuilding the cache. Note how much faster it is the second time:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
TFRecord File Raw image dataTFRecord files are a simple format for storing a sequence of binary blobs. By packing multiple examples into the same file, TensorFlow is able to read multiple examples at once, which is especially important for performance when using a remote storage service such as GCS.First, build a TFRecord file from the raw image data:
###Code
image_ds = tf.data.Dataset.from_tensor_slices(all_image_paths).map(tf.read_file)
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(image_ds)
###Output
_____no_output_____
###Markdown
Next build a dataset that reads from the TFRecord file and decodes/reformats the images using the `preprocess_image` function you defined earlier.
###Code
image_ds = tf.data.TFRecordDataset('images.tfrec').map(preprocess_image)
###Output
_____no_output_____
###Markdown
Zip that with the labels dataset you defined earlier, to get the expected `(image,label)` pairs.
###Code
ds = tf.data.Dataset.zip((image_ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
This is slower than the `cache` version because you have not cached the preprocessing. Serialized Tensors To save some preprocessing to the TFRecord file, first make a dataset of the processed images, as before:
###Code
paths_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
image_ds = paths_ds.map(load_and_preprocess_image)
image_ds
###Output
_____no_output_____
###Markdown
Now instead of a dataset of `.jpeg` strings, this is a dataset of tensors.To serialize this to a TFRecord file you first convert the dataset of tensors to a dataset of strings.
###Code
ds = image_ds.map(tf.serialize_tensor)
ds
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(ds)
###Output
_____no_output_____
###Markdown
With the preprocessing cached, data can be loaded from the TFRecord file quite efficiently. Just remember to de-serialize the tensor before trying to use it.
###Code
ds = tf.data.TFRecordDataset('images.tfrec')
def parse(x):
result = tf.parse_tensor(x, out_type=tf.float32)
result = tf.reshape(result, [192, 192, 3])
return result
ds = ds.map(parse, num_parallel_calls=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
Now, add the labels and apply the same standard operations as before:
###Code
ds = tf.data.Dataset.zip((ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Load images with tf.data Run in Google Colab View source on GitHub > Note: This is an archived TF1 notebook. These are configuredto run in TF2's [compatbility mode](https://www.tensorflow.org/guide/migrate)but will run in TF1 as well. To use TF1 in Colab, use the[%tensorflow_version 1.x](https://colab.research.google.com/notebooks/tensorflow_version.ipynb)magic. This tutorial provides a simple example of how to load an image dataset using `tf.data`.The dataset used in this example is distributed as directories of images, with one class of image per directory. Setup
###Code
import tensorflow.compat.v1 as tf
tf.__version__
AUTOTUNE = tf.data.AUTOTUNE
###Output
_____no_output_____
###Markdown
Download and inspect the dataset Retrieve the imagesBefore you start any training, you'll need a set of images to teach the network about the new classes you want to recognize. You've created an archive of creative-commons licensed flower photos to use initially.
###Code
import pathlib
data_root_orig = tf.keras.utils.get_file('flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
data_root = pathlib.Path(data_root_orig)
print(data_root)
###Output
_____no_output_____
###Markdown
After downloading 218MB, you should now have a copy of the flower photos available:
###Code
for item in data_root.iterdir():
print(item)
import random
all_image_paths = list(data_root.glob('*/*'))
all_image_paths = [str(path) for path in all_image_paths]
random.shuffle(all_image_paths)
image_count = len(all_image_paths)
image_count
all_image_paths[:10]
###Output
_____no_output_____
###Markdown
Inspect the imagesNow let's have a quick look at a couple of the images, so you know what you're dealing with:
###Code
import os
attributions = (data_root/"LICENSE.txt").open(encoding='utf-8').readlines()[4:]
attributions = [line.split(' CC-BY') for line in attributions]
attributions = dict(attributions)
import IPython.display as display
def caption_image(image_path):
image_rel = pathlib.Path(image_path).relative_to(data_root)
return "Image (CC BY 2.0) " + ' - '.join(attributions[str(image_rel)].split(' - ')[:-1])
for n in range(3):
image_path = random.choice(all_image_paths)
display.display(display.Image(image_path))
print(caption_image(image_path))
print()
###Output
_____no_output_____
###Markdown
Determine the label for each image List the available labels:
###Code
label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())
label_names
###Output
_____no_output_____
###Markdown
Assign an index to each label:
###Code
label_to_index = dict((name, index) for index,name in enumerate(label_names))
label_to_index
###Output
_____no_output_____
###Markdown
Create a list of every file, and its label index
###Code
all_image_labels = [label_to_index[pathlib.Path(path).parent.name]
for path in all_image_paths]
print("First 10 labels indices: ", all_image_labels[:10])
###Output
_____no_output_____
###Markdown
Load and format the images TensorFlow includes all the tools you need to load and process images:
###Code
img_path = all_image_paths[0]
img_path
###Output
_____no_output_____
###Markdown
here is the raw data:
###Code
img_raw = tf.io.read_file(img_path)
print(repr(img_raw)[:100]+"...")
###Output
_____no_output_____
###Markdown
Decode it into an image tensor:
###Code
img_tensor = tf.image.decode_image(img_raw)
print(img_tensor.shape)
print(img_tensor.dtype)
###Output
_____no_output_____
###Markdown
Resize it for your model:
###Code
img_final = tf.image.resize(img_tensor, [192, 192])
img_final = img_final/255.0
print(img_final.shape)
print(img_final.numpy().min())
print(img_final.numpy().max())
###Output
_____no_output_____
###Markdown
Wrap up these up in simple functions for later.
###Code
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [192, 192])
image /= 255.0 # normalize to [0,1] range
return image
def load_and_preprocess_image(path):
image = tf.read_file(path)
return preprocess_image(image)
import matplotlib.pyplot as plt
img_path = all_image_paths[0]
label = all_image_labels[0]
plt.imshow(load_and_preprocess_image(img_path))
plt.grid(False)
plt.xlabel(caption_image(img_path).encode('utf-8'))
plt.title(label_names[label].title())
print()
###Output
_____no_output_____
###Markdown
Build a `tf.data.Dataset` A dataset of images The easiest way to build a `tf.data.Dataset` is using the `from_tensor_slices` method.Slicing the array of strings results in a dataset of strings:
###Code
path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
###Output
_____no_output_____
###Markdown
The `output_shapes` and `output_types` fields describe the content of each item in the dataset. In this case it is a set of scalar binary-strings
###Code
print('shape: ', repr(path_ds.output_shapes))
print('type: ', path_ds.output_types)
print()
print(path_ds)
###Output
_____no_output_____
###Markdown
Now create a new dataset that loads and formats images on the fly by mapping `preprocess_image` over the dataset of paths.
###Code
image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
for n,image in enumerate(image_ds.take(4)):
plt.subplot(2,2,n+1)
plt.imshow(image)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.xlabel(caption_image(all_image_paths[n]))
plt.show()
###Output
_____no_output_____
###Markdown
A dataset of `(image, label)` pairs Using the same `from_tensor_slices` method you can build a dataset of labels
###Code
label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(all_image_labels, tf.int64))
for label in label_ds.take(10):
print(label_names[label.numpy()])
###Output
_____no_output_____
###Markdown
Since the datasets are in the same order you can just zip them together to get a dataset of `(image, label)` pairs.
###Code
image_label_ds = tf.data.Dataset.zip((image_ds, label_ds))
###Output
_____no_output_____
###Markdown
The new dataset's `shapes` and `types` are tuples of shapes and types as well, describing each field:
###Code
print(image_label_ds)
###Output
_____no_output_____
###Markdown
Note: When you have arrays like `all_image_labels` and `all_image_paths`, an alternative to using `tf.data.dataset.Dataset.zip` is slicing the pair of arrays.
###Code
ds = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels))
# The tuples are unpacked into the positional arguments of the mapped function
def load_and_preprocess_from_path_label(path, label):
return load_and_preprocess_image(path), label
image_label_ds = ds.map(load_and_preprocess_from_path_label)
image_label_ds
###Output
_____no_output_____
###Markdown
Basic methods for training To train a model with this dataset you will want the data:* To be well shuffled.* To be batched.* To repeat forever.* To have batches available as soon as possible.These features can be easily added using the `tf.data` api.
###Code
BATCH_SIZE = 32
# Setting a shuffle buffer size as large as the dataset ensures that the data is
# completely shuffled.
ds = image_label_ds.shuffle(buffer_size=image_count)
ds = ds.repeat()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches, in the background while the model is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
There are a few things to note here:1. The order is important. * A `.shuffle` *after* a `.repeat` would shuffle items across epoch boundaries (some items will be seen twice before others are seen at all). * A `.shuffle` *after* a `.batch` would shuffle the order of the batches, but not shuffle the items across batches.1. Use a `buffer_size` the same size as the dataset for a full shuffle. Up to the dataset size, large values provide better randomization, but use more memory.1. The shuffle buffer is filled before any elements are pulled from it. So a large `buffer_size` may cause a delay when your `Dataset` is starting.1. The shuffled dataset doesn't report the end of a dataset until the shuffle-buffer is completely empty. The `Dataset` is restarted by `.repeat`, causing another wait for the shuffle-buffer to be filled.This last point, as well as the order of `.shuffle` and `.repeat`, can be addressed by using the `tf.data.Dataset.apply` method with the fused `tf.data.experimental.shuffle_and_repeat` function:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE)
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
* For more on ordering the operations, see [Repeat and Shuffle](https://www.tensorflow.org/r1/guide/performance/datasetsrepeat_and_shuffle) in the Input Pipeline Performance guide. Pipe the dataset to a modelFetch a copy of MobileNet v2 from `tf.keras.applications`.This will be used for a simple transfer learning example.Set the MobileNet weights to be non-trainable:
###Code
mobile_net = tf.keras.applications.MobileNetV2(input_shape=(192, 192, 3), include_top=False)
mobile_net.trainable=False
###Output
_____no_output_____
###Markdown
This model expects its input to be normalized to the `[-1,1]` range:```help(keras_applications.mobilenet_v2.preprocess_input)```...This function applies the "Inception" preprocessing which convertsthe RGB values from [0, 255] to [-1, 1]... So before passing data to the MobileNet model, you need to convert the input from a range of `[0,1]` to `[-1,1]`.
###Code
def change_range(image,label):
return 2*image-1, label
keras_ds = ds.map(change_range)
###Output
_____no_output_____
###Markdown
The MobileNet returns a `6x6` spatial grid of features for each image.Pass it a batch of images to see:
###Code
# The dataset may take a few seconds to start, as it fills its shuffle buffer.
image_batch, label_batch = next(iter(keras_ds))
feature_map_batch = mobile_net(image_batch)
print(feature_map_batch.shape)
###Output
_____no_output_____
###Markdown
Because of this output shape, build a model wrapped around MobileNet using `tf.keras.layers.GlobalAveragePooling2D` to average over the space dimensions before the output `tf.keras.layers.Dense` layer:
###Code
model = tf.keras.Sequential([
mobile_net,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(len(label_names), activation = 'softmax')])
###Output
_____no_output_____
###Markdown
Now it produces outputs of the expected shape:
###Code
logit_batch = model(image_batch).numpy()
print("min logit:", logit_batch.min())
print("max logit:", logit_batch.max())
print()
print("Shape:", logit_batch.shape)
###Output
_____no_output_____
###Markdown
Compile the model to describe the training procedure:
###Code
model.compile(optimizer=tf.train.AdamOptimizer(),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
There are 2 trainable variables: the Dense `weights` and `bias`:
###Code
len(model.trainable_variables)
model.summary()
###Output
_____no_output_____
###Markdown
Train the model.Normally you would specify the real number of steps per epoch, but for demonstration purposes only run 3 steps.
###Code
steps_per_epoch=tf.ceil(len(all_image_paths)/BATCH_SIZE).numpy()
steps_per_epoch
model.fit(ds, epochs=1, steps_per_epoch=3)
###Output
_____no_output_____
###Markdown
PerformanceNote: This section just shows a couple of easy tricks that may help performance. For an in depth guide see [Input Pipeline Performance](https://www.tensorflow.org/r1/guide/performance/datasets).The simple pipeline used above reads each file individually, on each epoch. This is fine for local training on CPU but may not be sufficient for GPU training, and is totally inappropriate for any sort of distributed training. To investigate, first build a simple function to check the performance of our datasets:
###Code
import time
def timeit(ds, batches=2*steps_per_epoch+1):
overall_start = time.time()
# Fetch a single batch to prime the pipeline (fill the shuffle buffer),
# before starting the timer
it = iter(ds.take(batches+1))
next(it)
start = time.time()
for i,(images,labels) in enumerate(it):
if i%10 == 0:
print('.',end='')
print()
end = time.time()
duration = end-start
print("{} batches: {} s".format(batches, duration))
print("{:0.5f} Images/s".format(BATCH_SIZE*batches/duration))
print("Total time: {}s".format(end-overall_start))
###Output
_____no_output_____
###Markdown
The performance of the current dataset is:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Cache Use `tf.data.Dataset.cache` to easily cache calculations across epochs. This is especially performant if the data fits in memory.Here the images are cached, after being pre-precessed (decoded and resized):
###Code
ds = image_label_ds.cache()
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
One disadvantage to using an in-memory cache is that the cache must be rebuilt on each run, giving the same startup delay each time the dataset is started:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
If the data doesn't fit in memory, use a cache file:
###Code
ds = image_label_ds.cache(filename='./cache.tf-data')
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(1)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
The cache file also has the advantage that it can be used to quickly restart the dataset without rebuilding the cache. Note how much faster it is the second time:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
TFRecord File Raw image dataTFRecord files are a simple format for storing a sequence of binary blobs. By packing multiple examples into the same file, TensorFlow is able to read multiple examples at once, which is especially important for performance when using a remote storage service such as GCS.First, build a TFRecord file from the raw image data:
###Code
image_ds = tf.data.Dataset.from_tensor_slices(all_image_paths).map(tf.read_file)
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(image_ds)
###Output
_____no_output_____
###Markdown
Next build a dataset that reads from the TFRecord file and decodes/reformats the images using the `preprocess_image` function you defined earlier.
###Code
image_ds = tf.data.TFRecordDataset('images.tfrec').map(preprocess_image)
###Output
_____no_output_____
###Markdown
Zip that with the labels dataset you defined earlier, to get the expected `(image,label)` pairs.
###Code
ds = tf.data.Dataset.zip((image_ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
This is slower than the `cache` version because you have not cached the preprocessing. Serialized Tensors To save some preprocessing to the TFRecord file, first make a dataset of the processed images, as before:
###Code
paths_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
image_ds = paths_ds.map(load_and_preprocess_image)
image_ds
###Output
_____no_output_____
###Markdown
Now instead of a dataset of `.jpeg` strings, this is a dataset of tensors.To serialize this to a TFRecord file you first convert the dataset of tensors to a dataset of strings.
###Code
ds = image_ds.map(tf.serialize_tensor)
ds
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(ds)
###Output
_____no_output_____
###Markdown
With the preprocessing cached, data can be loaded from the TFRecord file quite efficiently. Just remember to de-serialize the tensor before trying to use it.
###Code
ds = tf.data.TFRecordDataset('images.tfrec')
def parse(x):
result = tf.parse_tensor(x, out_type=tf.float32)
result = tf.reshape(result, [192, 192, 3])
return result
ds = ds.map(parse, num_parallel_calls=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
Now, add the labels and apply the same standard operations as before:
###Code
ds = tf.data.Dataset.zip((ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Load images with tf.data Run in Google Colab View source on GitHub This tutorial provides a simple example of how to load an image dataset using `tf.data`.The dataset used in this example is distributed as directories of images, with one class of image per directory. Setup
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow.compat.v1 as tf
tf.__version__
AUTOTUNE = tf.data.experimental.AUTOTUNE
###Output
_____no_output_____
###Markdown
Download and inspect the dataset Retrieve the imagesBefore you start any training, you'll need a set of images to teach the network about the new classes you want to recognize. You've created an archive of creative-commons licensed flower photos to use initially.
###Code
import pathlib
data_root_orig = tf.keras.utils.get_file('flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
data_root = pathlib.Path(data_root_orig)
print(data_root)
###Output
_____no_output_____
###Markdown
After downloading 218MB, you should now have a copy of the flower photos available:
###Code
for item in data_root.iterdir():
print(item)
import random
all_image_paths = list(data_root.glob('*/*'))
all_image_paths = [str(path) for path in all_image_paths]
random.shuffle(all_image_paths)
image_count = len(all_image_paths)
image_count
all_image_paths[:10]
###Output
_____no_output_____
###Markdown
Inspect the imagesNow let's have a quick look at a couple of the images, so you know what you're dealing with:
###Code
import os
attributions = (data_root/"LICENSE.txt").open(encoding='utf-8').readlines()[4:]
attributions = [line.split(' CC-BY') for line in attributions]
attributions = dict(attributions)
import IPython.display as display
def caption_image(image_path):
image_rel = pathlib.Path(image_path).relative_to(data_root)
return "Image (CC BY 2.0) " + ' - '.join(attributions[str(image_rel)].split(' - ')[:-1])
for n in range(3):
image_path = random.choice(all_image_paths)
display.display(display.Image(image_path))
print(caption_image(image_path))
print()
###Output
_____no_output_____
###Markdown
Determine the label for each image List the available labels:
###Code
label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())
label_names
###Output
_____no_output_____
###Markdown
Assign an index to each label:
###Code
label_to_index = dict((name, index) for index,name in enumerate(label_names))
label_to_index
###Output
_____no_output_____
###Markdown
Create a list of every file, and its label index
###Code
all_image_labels = [label_to_index[pathlib.Path(path).parent.name]
for path in all_image_paths]
print("First 10 labels indices: ", all_image_labels[:10])
###Output
_____no_output_____
###Markdown
Load and format the images TensorFlow includes all the tools you need to load and process images:
###Code
img_path = all_image_paths[0]
img_path
###Output
_____no_output_____
###Markdown
here is the raw data:
###Code
img_raw = tf.io.read_file(img_path)
print(repr(img_raw)[:100]+"...")
###Output
_____no_output_____
###Markdown
Decode it into an image tensor:
###Code
img_tensor = tf.image.decode_image(img_raw)
print(img_tensor.shape)
print(img_tensor.dtype)
###Output
_____no_output_____
###Markdown
Resize it for your model:
###Code
img_final = tf.image.resize(img_tensor, [192, 192])
img_final = img_final/255.0
print(img_final.shape)
print(img_final.numpy().min())
print(img_final.numpy().max())
###Output
_____no_output_____
###Markdown
Wrap up these up in simple functions for later.
###Code
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [192, 192])
image /= 255.0 # normalize to [0,1] range
return image
def load_and_preprocess_image(path):
image = tf.read_file(path)
return preprocess_image(image)
import matplotlib.pyplot as plt
img_path = all_image_paths[0]
label = all_image_labels[0]
plt.imshow(load_and_preprocess_image(img_path))
plt.grid(False)
plt.xlabel(caption_image(img_path).encode('utf-8'))
plt.title(label_names[label].title())
print()
###Output
_____no_output_____
###Markdown
Build a `tf.data.Dataset` A dataset of images The easiest way to build a `tf.data.Dataset` is using the `from_tensor_slices` method.Slicing the array of strings results in a dataset of strings:
###Code
path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
###Output
_____no_output_____
###Markdown
The `output_shapes` and `output_types` fields describe the content of each item in the dataset. In this case it is a set of scalar binary-strings
###Code
print('shape: ', repr(path_ds.output_shapes))
print('type: ', path_ds.output_types)
print()
print(path_ds)
###Output
_____no_output_____
###Markdown
Now create a new dataset that loads and formats images on the fly by mapping `preprocess_image` over the dataset of paths.
###Code
image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
for n,image in enumerate(image_ds.take(4)):
plt.subplot(2,2,n+1)
plt.imshow(image)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.xlabel(caption_image(all_image_paths[n]))
plt.show()
###Output
_____no_output_____
###Markdown
A dataset of `(image, label)` pairs Using the same `from_tensor_slices` method you can build a dataset of labels
###Code
label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(all_image_labels, tf.int64))
for label in label_ds.take(10):
print(label_names[label.numpy()])
###Output
_____no_output_____
###Markdown
Since the datasets are in the same order you can just zip them together to get a dataset of `(image, label)` pairs.
###Code
image_label_ds = tf.data.Dataset.zip((image_ds, label_ds))
###Output
_____no_output_____
###Markdown
The new dataset's `shapes` and `types` are tuples of shapes and types as well, describing each field:
###Code
print(image_label_ds)
###Output
_____no_output_____
###Markdown
Note: When you have arrays like `all_image_labels` and `all_image_paths`, an alternative to using `tf.data.dataset.Dataset.zip` is slicing the pair of arrays.
###Code
ds = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels))
# The tuples are unpacked into the positional arguments of the mapped function
def load_and_preprocess_from_path_label(path, label):
return load_and_preprocess_image(path), label
image_label_ds = ds.map(load_and_preprocess_from_path_label)
image_label_ds
###Output
_____no_output_____
###Markdown
Basic methods for training To train a model with this dataset you will want the data:* To be well shuffled.* To be batched.* To repeat forever.* To have batches available as soon as possible.These features can be easily added using the `tf.data` api.
###Code
BATCH_SIZE = 32
# Setting a shuffle buffer size as large as the dataset ensures that the data is
# completely shuffled.
ds = image_label_ds.shuffle(buffer_size=image_count)
ds = ds.repeat()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches, in the background while the model is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
There are a few things to note here:1. The order is important. * A `.shuffle` *after* a `.repeat` would shuffle items across epoch boundaries (some items will be seen twice before others are seen at all). * A `.shuffle` *after* a `.batch` would shuffle the order of the batches, but not shuffle the items across batches.1. Use a `buffer_size` the same size as the dataset for a full shuffle. Up to the dataset size, large values provide better randomization, but use more memory.1. The shuffle buffer is filled before any elements are pulled from it. So a large `buffer_size` may cause a delay when your `Dataset` is starting.1. The shuffled dataset doesn't report the end of a dataset until the shuffle-buffer is completely empty. The `Dataset` is restarted by `.repeat`, causing another wait for the shuffle-buffer to be filled.This last point, as well as the order of `.shuffle` and `.repeat`, can be addressed by using the `tf.data.Dataset.apply` method with the fused `tf.data.experimental.shuffle_and_repeat` function:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE)
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
* For more on ordering the operations, see [Repeat and Shuffle](https://www.tensorflow.org/r1/guide/performance/datasetsrepeat_and_shuffle) in the Input Pipeline Performance guide. Pipe the dataset to a modelFetch a copy of MobileNet v2 from `tf.keras.applications`.This will be used for a simple transfer learning example.Set the MobileNet weights to be non-trainable:
###Code
mobile_net = tf.keras.applications.MobileNetV2(input_shape=(192, 192, 3), include_top=False)
mobile_net.trainable=False
###Output
_____no_output_____
###Markdown
This model expects its input to be normalized to the `[-1,1]` range:```help(keras_applications.mobilenet_v2.preprocess_input)```...This function applies the "Inception" preprocessing which convertsthe RGB values from [0, 255] to [-1, 1]... So before passing data to the MobileNet model, you need to convert the input from a range of `[0,1]` to `[-1,1]`.
###Code
def change_range(image,label):
return 2*image-1, label
keras_ds = ds.map(change_range)
###Output
_____no_output_____
###Markdown
The MobileNet returns a `6x6` spatial grid of features for each image.Pass it a batch of images to see:
###Code
# The dataset may take a few seconds to start, as it fills its shuffle buffer.
image_batch, label_batch = next(iter(keras_ds))
feature_map_batch = mobile_net(image_batch)
print(feature_map_batch.shape)
###Output
_____no_output_____
###Markdown
Because of this output shape, build a model wrapped around MobileNet using `tf.keras.layers.GlobalAveragePooling2D` to average over the space dimensions before the output `tf.keras.layers.Dense` layer:
###Code
model = tf.keras.Sequential([
mobile_net,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(len(label_names), activation = 'softmax')])
###Output
_____no_output_____
###Markdown
Now it produces outputs of the expected shape:
###Code
logit_batch = model(image_batch).numpy()
print("min logit:", logit_batch.min())
print("max logit:", logit_batch.max())
print()
print("Shape:", logit_batch.shape)
###Output
_____no_output_____
###Markdown
Compile the model to describe the training procedure:
###Code
model.compile(optimizer=tf.train.AdamOptimizer(),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
There are 2 trainable variables: the Dense `weights` and `bias`:
###Code
len(model.trainable_variables)
model.summary()
###Output
_____no_output_____
###Markdown
Train the model.Normally you would specify the real number of steps per epoch, but for demonstration purposes only run 3 steps.
###Code
steps_per_epoch=tf.ceil(len(all_image_paths)/BATCH_SIZE).numpy()
steps_per_epoch
model.fit(ds, epochs=1, steps_per_epoch=3)
###Output
_____no_output_____
###Markdown
PerformanceNote: This section just shows a couple of easy tricks that may help performance. For an in depth guide see [Input Pipeline Performance](https://www.tensorflow.org/r1/guide/performance/datasets).The simple pipeline used above reads each file individually, on each epoch. This is fine for local training on CPU but may not be sufficient for GPU training, and is totally inappropriate for any sort of distributed training. To investigate, first build a simple function to check the performance of our datasets:
###Code
import time
def timeit(ds, batches=2*steps_per_epoch+1):
overall_start = time.time()
# Fetch a single batch to prime the pipeline (fill the shuffle buffer),
# before starting the timer
it = iter(ds.take(batches+1))
next(it)
start = time.time()
for i,(images,labels) in enumerate(it):
if i%10 == 0:
print('.',end='')
print()
end = time.time()
duration = end-start
print("{} batches: {} s".format(batches, duration))
print("{:0.5f} Images/s".format(BATCH_SIZE*batches/duration))
print("Total time: {}s".format(end-overall_start))
###Output
_____no_output_____
###Markdown
The performance of the current dataset is:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Cache Use `tf.data.Dataset.cache` to easily cache calculations across epochs. This is especially performant if the data fits in memory.Here the images are cached, after being pre-precessed (decoded and resized):
###Code
ds = image_label_ds.cache()
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
One disadvantage to using an in-memory cache is that the cache must be rebuilt on each run, giving the same startup delay each time the dataset is started:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
If the data doesn't fit in memory, use a cache file:
###Code
ds = image_label_ds.cache(filename='./cache.tf-data')
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(1)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
The cache file also has the advantage that it can be used to quickly restart the dataset without rebuilding the cache. Note how much faster it is the second time:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
TFRecord File Raw image dataTFRecord files are a simple format for storing a sequence of binary blobs. By packing multiple examples into the same file, TensorFlow is able to read multiple examples at once, which is especially important for performance when using a remote storage service such as GCS.First, build a TFRecord file from the raw image data:
###Code
image_ds = tf.data.Dataset.from_tensor_slices(all_image_paths).map(tf.read_file)
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(image_ds)
###Output
_____no_output_____
###Markdown
Next build a dataset that reads from the TFRecord file and decodes/reformats the images using the `preprocess_image` function you defined earlier.
###Code
image_ds = tf.data.TFRecordDataset('images.tfrec').map(preprocess_image)
###Output
_____no_output_____
###Markdown
Zip that with the labels dataset you defined earlier, to get the expected `(image,label)` pairs.
###Code
ds = tf.data.Dataset.zip((image_ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
This is slower than the `cache` version because you have not cached the preprocessing. Serialized Tensors To save some preprocessing to the TFRecord file, first make a dataset of the processed images, as before:
###Code
paths_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
image_ds = paths_ds.map(load_and_preprocess_image)
image_ds
###Output
_____no_output_____
###Markdown
Now instead of a dataset of `.jpeg` strings, this is a dataset of tensors.To serialize this to a TFRecord file you first convert the dataset of tensors to a dataset of strings.
###Code
ds = image_ds.map(tf.serialize_tensor)
ds
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(ds)
###Output
_____no_output_____
###Markdown
With the preprocessing cached, data can be loaded from the TFRecord file quite efficiently. Just remember to de-serialize the tensor before trying to use it.
###Code
ds = tf.data.TFRecordDataset('images.tfrec')
def parse(x):
result = tf.parse_tensor(x, out_type=tf.float32)
result = tf.reshape(result, [192, 192, 3])
return result
ds = ds.map(parse, num_parallel_calls=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
Now, add the labels and apply the same standard operations as before:
###Code
ds = tf.data.Dataset.zip((ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Load images with tf.data Run in Google Colab View source on GitHub This tutorial provides a simple example of how to load an image dataset using `tf.data`.The dataset used in this example is distributed as directories of images, with one class of image per directory. Setup
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow.compat.v1 as tf
tf.__version__
AUTOTUNE = tf.data.experimental.AUTOTUNE
###Output
_____no_output_____
###Markdown
Download and inspect the dataset Retrieve the imagesBefore you start any training, you'll need a set of images to teach the network about the new classes you want to recognize. You've created an archive of creative-commons licensed flower photos to use initially.
###Code
import pathlib
data_root_orig = tf.keras.utils.get_file('flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
data_root = pathlib.Path(data_root_orig)
print(data_root)
###Output
_____no_output_____
###Markdown
After downloading 218MB, you should now have a copy of the flower photos available:
###Code
for item in data_root.iterdir():
print(item)
import random
all_image_paths = list(data_root.glob('*/*'))
all_image_paths = [str(path) for path in all_image_paths]
random.shuffle(all_image_paths)
image_count = len(all_image_paths)
image_count
all_image_paths[:10]
###Output
_____no_output_____
###Markdown
Inspect the imagesNow let's have a quick look at a couple of the images, so you know what you're dealing with:
###Code
import os
attributions = (data_root/"LICENSE.txt").open(encoding='utf-8').readlines()[4:]
attributions = [line.split(' CC-BY') for line in attributions]
attributions = dict(attributions)
import IPython.display as display
def caption_image(image_path):
image_rel = pathlib.Path(image_path).relative_to(data_root)
return "Image (CC BY 2.0) " + ' - '.join(attributions[str(image_rel)].split(' - ')[:-1])
for n in range(3):
image_path = random.choice(all_image_paths)
display.display(display.Image(image_path))
print(caption_image(image_path))
print()
###Output
_____no_output_____
###Markdown
Determine the label for each image List the available labels:
###Code
label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())
label_names
###Output
_____no_output_____
###Markdown
Assign an index to each label:
###Code
label_to_index = dict((name, index) for index,name in enumerate(label_names))
label_to_index
###Output
_____no_output_____
###Markdown
Create a list of every file, and its label index
###Code
all_image_labels = [label_to_index[pathlib.Path(path).parent.name]
for path in all_image_paths]
print("First 10 labels indices: ", all_image_labels[:10])
###Output
_____no_output_____
###Markdown
Load and format the images TensorFlow includes all the tools you need to load and process images:
###Code
img_path = all_image_paths[0]
img_path
###Output
_____no_output_____
###Markdown
here is the raw data:
###Code
img_raw = tf.io.read_file(img_path)
print(repr(img_raw)[:100]+"...")
###Output
_____no_output_____
###Markdown
Decode it into an image tensor:
###Code
img_tensor = tf.image.decode_image(img_raw)
print(img_tensor.shape)
print(img_tensor.dtype)
###Output
_____no_output_____
###Markdown
Resize it for your model:
###Code
img_final = tf.image.resize(img_tensor, [192, 192])
img_final = img_final/255.0
print(img_final.shape)
print(img_final.numpy().min())
print(img_final.numpy().max())
###Output
_____no_output_____
###Markdown
Wrap up these up in simple functions for later.
###Code
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [192, 192])
image /= 255.0 # normalize to [0,1] range
return image
def load_and_preprocess_image(path):
image = tf.read_file(path)
return preprocess_image(image)
import matplotlib.pyplot as plt
img_path = all_image_paths[0]
label = all_image_labels[0]
plt.imshow(load_and_preprocess_image(img_path))
plt.grid(False)
plt.xlabel(caption_image(img_path).encode('utf-8'))
plt.title(label_names[label].title())
print()
###Output
_____no_output_____
###Markdown
Build a `tf.data.Dataset` A dataset of images The easiest way to build a `tf.data.Dataset` is using the `from_tensor_slices` method.Slicing the array of strings results in a dataset of strings:
###Code
path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
###Output
_____no_output_____
###Markdown
The `output_shapes` and `output_types` fields describe the content of each item in the dataset. In this case it is a set of scalar binary-strings
###Code
print('shape: ', repr(path_ds.output_shapes))
print('type: ', path_ds.output_types)
print()
print(path_ds)
###Output
_____no_output_____
###Markdown
Now create a new dataset that loads and formats images on the fly by mapping `preprocess_image` over the dataset of paths.
###Code
image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
for n,image in enumerate(image_ds.take(4)):
plt.subplot(2,2,n+1)
plt.imshow(image)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.xlabel(caption_image(all_image_paths[n]))
plt.show()
###Output
_____no_output_____
###Markdown
A dataset of `(image, label)` pairs Using the same `from_tensor_slices` method you can build a dataset of labels
###Code
label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(all_image_labels, tf.int64))
for label in label_ds.take(10):
print(label_names[label.numpy()])
###Output
_____no_output_____
###Markdown
Since the datasets are in the same order you can just zip them together to get a dataset of `(image, label)` pairs.
###Code
image_label_ds = tf.data.Dataset.zip((image_ds, label_ds))
###Output
_____no_output_____
###Markdown
The new dataset's `shapes` and `types` are tuples of shapes and types as well, describing each field:
###Code
print(image_label_ds)
###Output
_____no_output_____
###Markdown
Note: When you have arrays like `all_image_labels` and `all_image_paths`, an alternative to using `tf.data.dataset.Dataset.zip` is slicing the pair of arrays.
###Code
ds = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels))
# The tuples are unpacked into the positional arguments of the mapped function
def load_and_preprocess_from_path_label(path, label):
return load_and_preprocess_image(path), label
image_label_ds = ds.map(load_and_preprocess_from_path_label)
image_label_ds
###Output
_____no_output_____
###Markdown
Basic methods for training To train a model with this dataset you will want the data:* To be well shuffled.* To be batched.* To repeat forever.* To have batches available as soon as possible.These features can be easily added using the `tf.data` api.
###Code
BATCH_SIZE = 32
# Setting a shuffle buffer size as large as the dataset ensures that the data is
# completely shuffled.
ds = image_label_ds.shuffle(buffer_size=image_count)
ds = ds.repeat()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches, in the background while the model is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
There are a few things to note here:1. The order is important. * A `.shuffle` *after* a `.repeat` would shuffle items across epoch boundaries (some items will be seen twice before others are seen at all). * A `.shuffle` *after* a `.batch` would shuffle the order of the batches, but not shuffle the items across batches.1. Use a `buffer_size` the same size as the dataset for a full shuffle. Up to the dataset size, large values provide better randomization, but use more memory.1. The shuffle buffer is filled before any elements are pulled from it. So a large `buffer_size` may cause a delay when your `Dataset` is starting.1. The shuffled dataset doesn't report the end of a dataset until the shuffle-buffer is completely empty. The `Dataset` is restarted by `.repeat`, causing another wait for the shuffle-buffer to be filled.This last point, as well as the order of `.shuffle` and `.repeat`, can be addressed by using the `tf.data.Dataset.apply` method with the fused `tf.data.experimental.shuffle_and_repeat` function:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE)
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
* For more on ordering the operations, see [Repeat and Shuffle](https://www.tensorflow.org/r1/guide/performance/datasetsrepeat_and_shuffle) in the Input Pipeline Performance guide. Pipe the dataset to a modelFetch a copy of MobileNet v2 from `tf.keras.applications`.This will be used for a simple transfer learning example.Set the MobileNet weights to be non-trainable:
###Code
mobile_net = tf.keras.applications.MobileNetV2(input_shape=(192, 192, 3), include_top=False)
mobile_net.trainable=False
###Output
_____no_output_____
###Markdown
This model expects its input to be normalized to the `[-1,1]` range:```help(keras_applications.mobilenet_v2.preprocess_input)```...This function applies the "Inception" preprocessing which convertsthe RGB values from [0, 255] to [-1, 1]... So before passing data to the MobileNet model, you need to convert the input from a range of `[0,1]` to `[-1,1]`.
###Code
def change_range(image,label):
return 2*image-1, label
keras_ds = ds.map(change_range)
###Output
_____no_output_____
###Markdown
The MobileNet returns a `6x6` spatial grid of features for each image.Pass it a batch of images to see:
###Code
# The dataset may take a few seconds to start, as it fills its shuffle buffer.
image_batch, label_batch = next(iter(keras_ds))
feature_map_batch = mobile_net(image_batch)
print(feature_map_batch.shape)
###Output
_____no_output_____
###Markdown
Because of this output shape, build a model wrapped around MobileNet using `tf.keras.layers.GlobalAveragePooling2D` to average over the space dimensions before the output `tf.keras.layers.Dense` layer:
###Code
model = tf.keras.Sequential([
mobile_net,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(len(label_names), activation = 'softmax')])
###Output
_____no_output_____
###Markdown
Now it produces outputs of the expected shape:
###Code
logit_batch = model(image_batch).numpy()
print("min logit:", logit_batch.min())
print("max logit:", logit_batch.max())
print()
print("Shape:", logit_batch.shape)
###Output
_____no_output_____
###Markdown
Compile the model to describe the training procedure:
###Code
model.compile(optimizer=tf.train.AdamOptimizer(),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
There are 2 trainable variables: the Dense `weights` and `bias`:
###Code
len(model.trainable_variables)
model.summary()
###Output
_____no_output_____
###Markdown
Train the model.Normally you would specify the real number of steps per epoch, but for demonstration purposes only run 3 steps.
###Code
steps_per_epoch=tf.ceil(len(all_image_paths)/BATCH_SIZE).numpy()
steps_per_epoch
model.fit(ds, epochs=1, steps_per_epoch=3)
###Output
_____no_output_____
###Markdown
PerformanceNote: This section just shows a couple of easy tricks that may help performance. For an in depth guide see [Input Pipeline Performance](https://www.tensorflow.org/r1/guide/performance/datasets).The simple pipeline used above reads each file individually, on each epoch. This is fine for local training on CPU but may not be sufficient for GPU training, and is totally inappropriate for any sort of distributed training. To investigate, first build a simple function to check the performance of our datasets:
###Code
import time
def timeit(ds, batches=2*steps_per_epoch+1):
overall_start = time.time()
# Fetch a single batch to prime the pipeline (fill the shuffle buffer),
# before starting the timer
it = iter(ds.take(batches+1))
next(it)
start = time.time()
for i,(images,labels) in enumerate(it):
if i%10 == 0:
print('.',end='')
print()
end = time.time()
duration = end-start
print("{} batches: {} s".format(batches, duration))
print("{:0.5f} Images/s".format(BATCH_SIZE*batches/duration))
print("Total time: {}s".format(end-overall_start))
###Output
_____no_output_____
###Markdown
The performance of the current dataset is:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Cache Use `tf.data.Dataset.cache` to easily cache calculations across epochs. This is especially performant if the data fits in memory.Here the images are cached, after being pre-precessed (decoded and resized):
###Code
ds = image_label_ds.cache()
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
One disadvantage to using an in-memory cache is that the cache must be rebuilt on each run, giving the same startup delay each time the dataset is started:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
If the data doesn't fit in memory, use a cache file:
###Code
ds = image_label_ds.cache(filename='./cache.tf-data')
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(1)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
The cache file also has the advantage that it can be used to quickly restart the dataset without rebuilding the cache. Note how much faster it is the second time:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
TFRecord File Raw image dataTFRecord files are a simple format for storing a sequence of binary blobs. By packing multiple examples into the same file, TensorFlow is able to read multiple examples at once, which is especially important for performance when using a remote storage service such as GCS.First, build a TFRecord file from the raw image data:
###Code
image_ds = tf.data.Dataset.from_tensor_slices(all_image_paths).map(tf.read_file)
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(image_ds)
###Output
_____no_output_____
###Markdown
Next build a dataset that reads from the TFRecord file and decodes/reformats the images using the `preprocess_image` function you defined earlier.
###Code
image_ds = tf.data.TFRecordDataset('images.tfrec').map(preprocess_image)
###Output
_____no_output_____
###Markdown
Zip that with the labels dataset you defined earlier, to get the expected `(image,label)` pairs.
###Code
ds = tf.data.Dataset.zip((image_ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
This is slower than the `cache` version because you have not cached the preprocessing. Serialized Tensors To save some preprocessing to the TFRecord file, first make a dataset of the processed images, as before:
###Code
paths_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
image_ds = paths_ds.map(load_and_preprocess_image)
image_ds
###Output
_____no_output_____
###Markdown
Now instead of a dataset of `.jpeg` strings, this is a dataset of tensors.To serialize this to a TFRecord file you first convert the dataset of tensors to a dataset of strings.
###Code
ds = image_ds.map(tf.serialize_tensor)
ds
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(ds)
###Output
_____no_output_____
###Markdown
With the preprocessing cached, data can be loaded from the TFRecord file quite efficiently. Just remember to de-serialize the tensor before trying to use it.
###Code
ds = tf.data.TFRecordDataset('images.tfrec')
def parse(x):
result = tf.parse_tensor(x, out_type=tf.float32)
result = tf.reshape(result, [192, 192, 3])
return result
ds = ds.map(parse, num_parallel_calls=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
Now, add the labels and apply the same standard operations as before:
###Code
ds = tf.data.Dataset.zip((ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Load images with tf.data Run in Google Colab View source on GitHub This tutorial provides a simple example of how to load an image dataset using `tf.data`.The dataset used in this example is distributed as directories of images, with one class of image per directory. Setup
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow.compat.v1 as tf
tf.__version__
AUTOTUNE = tf.data.experimental.AUTOTUNE
###Output
_____no_output_____
###Markdown
Download and inspect the dataset Retrieve the imagesBefore you start any training, you'll need a set of images to teach the network about the new classes you want to recognize. You've created an archive of creative-commons licensed flower photos to use initially.
###Code
import pathlib
data_root_orig = tf.keras.utils.get_file('flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
data_root = pathlib.Path(data_root_orig)
print(data_root)
###Output
_____no_output_____
###Markdown
After downloading 218MB, you should now have a copy of the flower photos available:
###Code
for item in data_root.iterdir():
print(item)
import random
all_image_paths = list(data_root.glob('*/*'))
all_image_paths = [str(path) for path in all_image_paths]
random.shuffle(all_image_paths)
image_count = len(all_image_paths)
image_count
all_image_paths[:10]
###Output
_____no_output_____
###Markdown
Inspect the imagesNow let's have a quick look at a couple of the images, so you know what you're dealing with:
###Code
import os
attributions = (data_root/"LICENSE.txt").open(encoding='utf-8').readlines()[4:]
attributions = [line.split(' CC-BY') for line in attributions]
attributions = dict(attributions)
import IPython.display as display
def caption_image(image_path):
image_rel = pathlib.Path(image_path).relative_to(data_root)
return "Image (CC BY 2.0) " + ' - '.join(attributions[str(image_rel)].split(' - ')[:-1])
for n in range(3):
image_path = random.choice(all_image_paths)
display.display(display.Image(image_path))
print(caption_image(image_path))
print()
###Output
_____no_output_____
###Markdown
Determine the label for each image List the available labels:
###Code
label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())
label_names
###Output
_____no_output_____
###Markdown
Assign an index to each label:
###Code
label_to_index = dict((name, index) for index,name in enumerate(label_names))
label_to_index
###Output
_____no_output_____
###Markdown
Create a list of every file, and its label index
###Code
all_image_labels = [label_to_index[pathlib.Path(path).parent.name]
for path in all_image_paths]
print("First 10 labels indices: ", all_image_labels[:10])
###Output
_____no_output_____
###Markdown
Load and format the images TensorFlow includes all the tools you need to load and process images:
###Code
img_path = all_image_paths[0]
img_path
###Output
_____no_output_____
###Markdown
here is the raw data:
###Code
img_raw = tf.io.read_file(img_path)
print(repr(img_raw)[:100]+"...")
###Output
_____no_output_____
###Markdown
Decode it into an image tensor:
###Code
img_tensor = tf.image.decode_image(img_raw)
print(img_tensor.shape)
print(img_tensor.dtype)
###Output
_____no_output_____
###Markdown
Resize it for your model:
###Code
img_final = tf.image.resize(img_tensor, [192, 192])
img_final = img_final/255.0
print(img_final.shape)
print(img_final.numpy().min())
print(img_final.numpy().max())
###Output
_____no_output_____
###Markdown
Wrap up these up in simple functions for later.
###Code
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [192, 192])
image /= 255.0 # normalize to [0,1] range
return image
def load_and_preprocess_image(path):
image = tf.read_file(path)
return preprocess_image(image)
import matplotlib.pyplot as plt
img_path = all_image_paths[0]
label = all_image_labels[0]
plt.imshow(load_and_preprocess_image(img_path))
plt.grid(False)
plt.xlabel(caption_image(img_path).encode('utf-8'))
plt.title(label_names[label].title())
print()
###Output
_____no_output_____
###Markdown
Build a `tf.data.Dataset` A dataset of images The easiest way to build a `tf.data.Dataset` is using the `from_tensor_slices` method.Slicing the array of strings results in a dataset of strings:
###Code
path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
###Output
_____no_output_____
###Markdown
The `output_shapes` and `output_types` fields describe the content of each item in the dataset. In this case it is a set of scalar binary-strings
###Code
print('shape: ', repr(path_ds.output_shapes))
print('type: ', path_ds.output_types)
print()
print(path_ds)
###Output
_____no_output_____
###Markdown
Now create a new dataset that loads and formats images on the fly by mapping `preprocess_image` over the dataset of paths.
###Code
image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
for n,image in enumerate(image_ds.take(4)):
plt.subplot(2,2,n+1)
plt.imshow(image)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.xlabel(caption_image(all_image_paths[n]))
plt.show()
###Output
_____no_output_____
###Markdown
A dataset of `(image, label)` pairs Using the same `from_tensor_slices` method you can build a dataset of labels
###Code
label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(all_image_labels, tf.int64))
for label in label_ds.take(10):
print(label_names[label.numpy()])
###Output
_____no_output_____
###Markdown
Since the datasets are in the same order you can just zip them together to get a dataset of `(image, label)` pairs.
###Code
image_label_ds = tf.data.Dataset.zip((image_ds, label_ds))
###Output
_____no_output_____
###Markdown
The new dataset's `shapes` and `types` are tuples of shapes and types as well, describing each field:
###Code
print(image_label_ds)
###Output
_____no_output_____
###Markdown
Note: When you have arrays like `all_image_labels` and `all_image_paths`, an alternative to using `tf.data.dataset.Dataset.zip` is slicing the pair of arrays.
###Code
ds = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels))
# The tuples are unpacked into the positional arguments of the mapped function
def load_and_preprocess_from_path_label(path, label):
return load_and_preprocess_image(path), label
image_label_ds = ds.map(load_and_preprocess_from_path_label)
image_label_ds
###Output
_____no_output_____
###Markdown
Basic methods for training To train a model with this dataset you will want the data:* To be well shuffled.* To be batched.* To repeat forever.* To have batches available as soon as possible.These features can be easily added using the `tf.data` api.
###Code
BATCH_SIZE = 32
# Setting a shuffle buffer size as large as the dataset ensures that the data is
# completely shuffled.
ds = image_label_ds.shuffle(buffer_size=image_count)
ds = ds.repeat()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches, in the background while the model is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
There are a few things to note here:1. The order is important. * A `.shuffle` *after* a `.repeat` would shuffle items across epoch boundaries (some items will be seen twice before others are seen at all). * A `.shuffle` *after* a `.batch` would shuffle the order of the batches, but not shuffle the items across batches.1. Use a `buffer_size` the same size as the dataset for a full shuffle. Up to the dataset size, large values provide better randomization, but use more memory.1. The shuffle buffer is filled before any elements are pulled from it. So a large `buffer_size` may cause a delay when your `Dataset` is starting.1. The shuffled dataset doesn't report the end of a dataset until the shuffle-buffer is completely empty. The `Dataset` is restarted by `.repeat`, causing another wait for the shuffle-buffer to be filled.This last point, as well as the order of `.shuffle` and `.repeat`, can be addressed by using the `tf.data.Dataset.apply` method with the fused `tf.data.experimental.shuffle_and_repeat` function:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE)
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
* For more on ordering the operations, see [Repeat and Shuffle](https://www.tensorflow.org/r1/guide/performance/datasetsrepeat_and_shuffle) in the Input Pipeline Performance guide. Pipe the dataset to a modelFetch a copy of MobileNet v2 from `tf.keras.applications`.This will be used for a simple transfer learning example.Set the MobileNet weights to be non-trainable:
###Code
mobile_net = tf.keras.applications.MobileNetV2(input_shape=(192, 192, 3), include_top=False)
mobile_net.trainable=False
###Output
_____no_output_____
###Markdown
This model expects its input to be normalized to the `[-1,1]` range:```help(keras_applications.mobilenet_v2.preprocess_input)```...This function applies the "Inception" preprocessing which convertsthe RGB values from [0, 255] to [-1, 1]... So before passing data to the MobileNet model, you need to convert the input from a range of `[0,1]` to `[-1,1]`.
###Code
def change_range(image,label):
return 2*image-1, label
keras_ds = ds.map(change_range)
###Output
_____no_output_____
###Markdown
The MobileNet returns a `6x6` spatial grid of features for each image.Pass it a batch of images to see:
###Code
# The dataset may take a few seconds to start, as it fills its shuffle buffer.
image_batch, label_batch = next(iter(keras_ds))
feature_map_batch = mobile_net(image_batch)
print(feature_map_batch.shape)
###Output
_____no_output_____
###Markdown
Because of this output shape, build a model wrapped around MobileNet using `tf.keras.layers.GlobalAveragePooling2D` to average over the space dimensions before the output `tf.keras.layers.Dense` layer:
###Code
model = tf.keras.Sequential([
mobile_net,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(len(label_names), activation = 'softmax')])
###Output
_____no_output_____
###Markdown
Now it produces outputs of the expected shape:
###Code
logit_batch = model(image_batch).numpy()
print("min logit:", logit_batch.min())
print("max logit:", logit_batch.max())
print()
print("Shape:", logit_batch.shape)
###Output
_____no_output_____
###Markdown
Compile the model to describe the training procedure:
###Code
model.compile(optimizer=tf.train.AdamOptimizer(),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
There are 2 trainable variables: the Dense `weights` and `bias`:
###Code
len(model.trainable_variables)
model.summary()
###Output
_____no_output_____
###Markdown
Train the model.Normally you would specify the real number of steps per epoch, but for demonstration purposes only run 3 steps.
###Code
steps_per_epoch=tf.ceil(len(all_image_paths)/BATCH_SIZE).numpy()
steps_per_epoch
model.fit(ds, epochs=1, steps_per_epoch=3)
###Output
_____no_output_____
###Markdown
PerformanceNote: This section just shows a couple of easy tricks that may help performance. For an in depth guide see [Input Pipeline Performance](https://www.tensorflow.org/r1/guide/performance/datasets).The simple pipeline used above reads each file individually, on each epoch. This is fine for local training on CPU but may not be sufficient for GPU training, and is totally inappropriate for any sort of distributed training. To investigate, first build a simple function to check the performance of our datasets:
###Code
import time
def timeit(ds, batches=2*steps_per_epoch+1):
overall_start = time.time()
# Fetch a single batch to prime the pipeline (fill the shuffle buffer),
# before starting the timer
it = iter(ds.take(batches+1))
next(it)
start = time.time()
for i,(images,labels) in enumerate(it):
if i%10 == 0:
print('.',end='')
print()
end = time.time()
duration = end-start
print("{} batches: {} s".format(batches, duration))
print("{:0.5f} Images/s".format(BATCH_SIZE*batches/duration))
print("Total time: {}s".format(end-overall_start))
###Output
_____no_output_____
###Markdown
The performance of the current dataset is:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Cache Use `tf.data.Dataset.cache` to easily cache calculations across epochs. This is especially performant if the data fits in memory.Here the images are cached, after being pre-precessed (decoded and resized):
###Code
ds = image_label_ds.cache()
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
One disadvantage to using an in-memory cache is that the cache must be rebuilt on each run, giving the same startup delay each time the dataset is started:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
If the data doesn't fit in memory, use a cache file:
###Code
ds = image_label_ds.cache(filename='./cache.tf-data')
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(1)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
The cache file also has the advantage that it can be used to quickly restart the dataset without rebuilding the cache. Note how much faster it is the second time:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
TFRecord File Raw image dataTFRecord files are a simple format for storing a sequence of binary blobs. By packing multiple examples into the same file, TensorFlow is able to read multiple examples at once, which is especially important for performance when using a remote storage service such as GCS.First, build a TFRecord file from the raw image data:
###Code
image_ds = tf.data.Dataset.from_tensor_slices(all_image_paths).map(tf.read_file)
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(image_ds)
###Output
_____no_output_____
###Markdown
Next build a dataset that reads from the TFRecord file and decodes/reformats the images using the `preprocess_image` function you defined earlier.
###Code
image_ds = tf.data.TFRecordDataset('images.tfrec').map(preprocess_image)
###Output
_____no_output_____
###Markdown
Zip that with the labels dataset you defined earlier, to get the expected `(image,label)` pairs.
###Code
ds = tf.data.Dataset.zip((image_ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
This is slower than the `cache` version because you have not cached the preprocessing. Serialized Tensors To save some preprocessing to the TFRecord file, first make a dataset of the processed images, as before:
###Code
paths_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
image_ds = paths_ds.map(load_and_preprocess_image)
image_ds
###Output
_____no_output_____
###Markdown
Now instead of a dataset of `.jpeg` strings, this is a dataset of tensors.To serialize this to a TFRecord file you first convert the dataset of tensors to a dataset of strings.
###Code
ds = image_ds.map(tf.serialize_tensor)
ds
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(ds)
###Output
_____no_output_____
###Markdown
With the preprocessing cached, data can be loaded from the TFRecord file quite efficiently. Just remember to de-serialize the tensor before trying to use it.
###Code
ds = tf.data.TFRecordDataset('images.tfrec')
def parse(x):
result = tf.parse_tensor(x, out_type=tf.float32)
result = tf.reshape(result, [192, 192, 3])
return result
ds = ds.map(parse, num_parallel_calls=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
Now, add the labels and apply the same standard operations as before:
###Code
ds = tf.data.Dataset.zip((ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Load images with tf.data Run in Google Colab View source on GitHub > Note: This is an archived TF1 notebook. These are configuredto run in TF2's [compatibility mode](https://www.tensorflow.org/guide/migrate)but will run in TF1 as well. To use TF1 in Colab, use the[%tensorflow_version 1.x](https://colab.research.google.com/notebooks/tensorflow_version.ipynb)magic. This tutorial provides a simple example of how to load an image dataset using `tf.data`.The dataset used in this example is distributed as directories of images, with one class of image per directory. Setup
###Code
import tensorflow.compat.v1 as tf
tf.__version__
AUTOTUNE = tf.data.AUTOTUNE
###Output
_____no_output_____
###Markdown
Download and inspect the dataset Retrieve the imagesBefore you start any training, you'll need a set of images to teach the network about the new classes you want to recognize. You've created an archive of creative-commons licensed flower photos to use initially.
###Code
import pathlib
data_root_orig = tf.keras.utils.get_file('flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
data_root = pathlib.Path(data_root_orig)
print(data_root)
###Output
_____no_output_____
###Markdown
After downloading 218MB, you should now have a copy of the flower photos available:
###Code
for item in data_root.iterdir():
print(item)
import random
all_image_paths = list(data_root.glob('*/*'))
all_image_paths = [str(path) for path in all_image_paths]
random.shuffle(all_image_paths)
image_count = len(all_image_paths)
image_count
all_image_paths[:10]
###Output
_____no_output_____
###Markdown
Inspect the imagesNow let's have a quick look at a couple of the images, so you know what you're dealing with:
###Code
import os
attributions = (data_root/"LICENSE.txt").open(encoding='utf-8').readlines()[4:]
attributions = [line.split(' CC-BY') for line in attributions]
attributions = dict(attributions)
import IPython.display as display
def caption_image(image_path):
image_rel = pathlib.Path(image_path).relative_to(data_root)
return "Image (CC BY 2.0) " + ' - '.join(attributions[str(image_rel)].split(' - ')[:-1])
for n in range(3):
image_path = random.choice(all_image_paths)
display.display(display.Image(image_path))
print(caption_image(image_path))
print()
###Output
_____no_output_____
###Markdown
Determine the label for each image List the available labels:
###Code
label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())
label_names
###Output
_____no_output_____
###Markdown
Assign an index to each label:
###Code
label_to_index = dict((name, index) for index,name in enumerate(label_names))
label_to_index
###Output
_____no_output_____
###Markdown
Create a list of every file, and its label index
###Code
all_image_labels = [label_to_index[pathlib.Path(path).parent.name]
for path in all_image_paths]
print("First 10 labels indices: ", all_image_labels[:10])
###Output
_____no_output_____
###Markdown
Load and format the images TensorFlow includes all the tools you need to load and process images:
###Code
img_path = all_image_paths[0]
img_path
###Output
_____no_output_____
###Markdown
here is the raw data:
###Code
img_raw = tf.io.read_file(img_path)
print(repr(img_raw)[:100]+"...")
###Output
_____no_output_____
###Markdown
Decode it into an image tensor:
###Code
img_tensor = tf.image.decode_image(img_raw)
print(img_tensor.shape)
print(img_tensor.dtype)
###Output
_____no_output_____
###Markdown
Resize it for your model:
###Code
img_final = tf.image.resize(img_tensor, [192, 192])
img_final = img_final/255.0
print(img_final.shape)
print(img_final.numpy().min())
print(img_final.numpy().max())
###Output
_____no_output_____
###Markdown
Wrap up these up in simple functions for later.
###Code
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [192, 192])
image /= 255.0 # normalize to [0,1] range
return image
def load_and_preprocess_image(path):
image = tf.read_file(path)
return preprocess_image(image)
import matplotlib.pyplot as plt
img_path = all_image_paths[0]
label = all_image_labels[0]
plt.imshow(load_and_preprocess_image(img_path))
plt.grid(False)
plt.xlabel(caption_image(img_path).encode('utf-8'))
plt.title(label_names[label].title())
print()
###Output
_____no_output_____
###Markdown
Build a `tf.data.Dataset` A dataset of images The easiest way to build a `tf.data.Dataset` is using the `from_tensor_slices` method.Slicing the array of strings results in a dataset of strings:
###Code
path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
###Output
_____no_output_____
###Markdown
The `output_shapes` and `output_types` fields describe the content of each item in the dataset. In this case it is a set of scalar binary-strings
###Code
print('shape: ', repr(path_ds.output_shapes))
print('type: ', path_ds.output_types)
print()
print(path_ds)
###Output
_____no_output_____
###Markdown
Now create a new dataset that loads and formats images on the fly by mapping `preprocess_image` over the dataset of paths.
###Code
image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
for n,image in enumerate(image_ds.take(4)):
plt.subplot(2,2,n+1)
plt.imshow(image)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.xlabel(caption_image(all_image_paths[n]))
plt.show()
###Output
_____no_output_____
###Markdown
A dataset of `(image, label)` pairs Using the same `from_tensor_slices` method you can build a dataset of labels
###Code
label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(all_image_labels, tf.int64))
for label in label_ds.take(10):
print(label_names[label.numpy()])
###Output
_____no_output_____
###Markdown
Since the datasets are in the same order you can just zip them together to get a dataset of `(image, label)` pairs.
###Code
image_label_ds = tf.data.Dataset.zip((image_ds, label_ds))
###Output
_____no_output_____
###Markdown
The new dataset's `shapes` and `types` are tuples of shapes and types as well, describing each field:
###Code
print(image_label_ds)
###Output
_____no_output_____
###Markdown
Note: When you have arrays like `all_image_labels` and `all_image_paths`, an alternative to using `tf.data.dataset.Dataset.zip` is slicing the pair of arrays.
###Code
ds = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels))
# The tuples are unpacked into the positional arguments of the mapped function
def load_and_preprocess_from_path_label(path, label):
return load_and_preprocess_image(path), label
image_label_ds = ds.map(load_and_preprocess_from_path_label)
image_label_ds
###Output
_____no_output_____
###Markdown
Basic methods for training To train a model with this dataset you will want the data:* To be well shuffled.* To be batched.* To repeat forever.* To have batches available as soon as possible.These features can be easily added using the `tf.data` api.
###Code
BATCH_SIZE = 32
# Setting a shuffle buffer size as large as the dataset ensures that the data is
# completely shuffled.
ds = image_label_ds.shuffle(buffer_size=image_count)
ds = ds.repeat()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches, in the background while the model is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
There are a few things to note here:1. The order is important. * A `.shuffle` *after* a `.repeat` would shuffle items across epoch boundaries (some items will be seen twice before others are seen at all). * A `.shuffle` *after* a `.batch` would shuffle the order of the batches, but not shuffle the items across batches.1. Use a `buffer_size` the same size as the dataset for a full shuffle. Up to the dataset size, large values provide better randomization, but use more memory.1. The shuffle buffer is filled before any elements are pulled from it. So a large `buffer_size` may cause a delay when your `Dataset` is starting.1. The shuffled dataset doesn't report the end of a dataset until the shuffle-buffer is completely empty. The `Dataset` is restarted by `.repeat`, causing another wait for the shuffle-buffer to be filled.This last point, as well as the order of `.shuffle` and `.repeat`, can be addressed by using the `tf.data.Dataset.apply` method with the fused `tf.data.experimental.shuffle_and_repeat` function:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE)
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
* For more on ordering the operations, see [Repeat and Shuffle](https://www.tensorflow.org/r1/guide/performance/datasetsrepeat_and_shuffle) in the Input Pipeline Performance guide. Pipe the dataset to a modelFetch a copy of MobileNet v2 from `tf.keras.applications`.This will be used for a simple transfer learning example.Set the MobileNet weights to be non-trainable:
###Code
mobile_net = tf.keras.applications.MobileNetV2(input_shape=(192, 192, 3), include_top=False)
mobile_net.trainable=False
###Output
_____no_output_____
###Markdown
This model expects its input to be normalized to the `[-1,1]` range:```help(keras_applications.mobilenet_v2.preprocess_input)```...This function applies the "Inception" preprocessing which convertsthe RGB values from [0, 255] to [-1, 1]... So before passing data to the MobileNet model, you need to convert the input from a range of `[0,1]` to `[-1,1]`.
###Code
def change_range(image,label):
return 2*image-1, label
keras_ds = ds.map(change_range)
###Output
_____no_output_____
###Markdown
The MobileNet returns a `6x6` spatial grid of features for each image.Pass it a batch of images to see:
###Code
# The dataset may take a few seconds to start, as it fills its shuffle buffer.
image_batch, label_batch = next(iter(keras_ds))
feature_map_batch = mobile_net(image_batch)
print(feature_map_batch.shape)
###Output
_____no_output_____
###Markdown
Because of this output shape, build a model wrapped around MobileNet using `tf.keras.layers.GlobalAveragePooling2D` to average over the space dimensions before the output `tf.keras.layers.Dense` layer:
###Code
model = tf.keras.Sequential([
mobile_net,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(len(label_names), activation = 'softmax')])
###Output
_____no_output_____
###Markdown
Now it produces outputs of the expected shape:
###Code
logit_batch = model(image_batch).numpy()
print("min logit:", logit_batch.min())
print("max logit:", logit_batch.max())
print()
print("Shape:", logit_batch.shape)
###Output
_____no_output_____
###Markdown
Compile the model to describe the training procedure:
###Code
model.compile(optimizer=tf.train.AdamOptimizer(),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
There are 2 trainable variables: the Dense `weights` and `bias`:
###Code
len(model.trainable_variables)
model.summary()
###Output
_____no_output_____
###Markdown
Train the model.Normally you would specify the real number of steps per epoch, but for demonstration purposes only run 3 steps.
###Code
steps_per_epoch=tf.ceil(len(all_image_paths)/BATCH_SIZE).numpy()
steps_per_epoch
model.fit(ds, epochs=1, steps_per_epoch=3)
###Output
_____no_output_____
###Markdown
PerformanceNote: This section just shows a couple of easy tricks that may help performance. For an in depth guide see [Input Pipeline Performance](https://www.tensorflow.org/r1/guide/performance/datasets).The simple pipeline used above reads each file individually, on each epoch. This is fine for local training on CPU but may not be sufficient for GPU training, and is totally inappropriate for any sort of distributed training. To investigate, first build a simple function to check the performance of our datasets:
###Code
import time
def timeit(ds, batches=2*steps_per_epoch+1):
overall_start = time.time()
# Fetch a single batch to prime the pipeline (fill the shuffle buffer),
# before starting the timer
it = iter(ds.take(batches+1))
next(it)
start = time.time()
for i,(images,labels) in enumerate(it):
if i%10 == 0:
print('.',end='')
print()
end = time.time()
duration = end-start
print("{} batches: {} s".format(batches, duration))
print("{:0.5f} Images/s".format(BATCH_SIZE*batches/duration))
print("Total time: {}s".format(end-overall_start))
###Output
_____no_output_____
###Markdown
The performance of the current dataset is:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Cache Use `tf.data.Dataset.cache` to easily cache calculations across epochs. This is especially performant if the data fits in memory.Here the images are cached, after being pre-precessed (decoded and resized):
###Code
ds = image_label_ds.cache()
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
One disadvantage to using an in-memory cache is that the cache must be rebuilt on each run, giving the same startup delay each time the dataset is started:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
If the data doesn't fit in memory, use a cache file:
###Code
ds = image_label_ds.cache(filename='./cache.tf-data')
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(1)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
The cache file also has the advantage that it can be used to quickly restart the dataset without rebuilding the cache. Note how much faster it is the second time:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
TFRecord File Raw image dataTFRecord files are a simple format for storing a sequence of binary blobs. By packing multiple examples into the same file, TensorFlow is able to read multiple examples at once, which is especially important for performance when using a remote storage service such as GCS.First, build a TFRecord file from the raw image data:
###Code
image_ds = tf.data.Dataset.from_tensor_slices(all_image_paths).map(tf.read_file)
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(image_ds)
###Output
_____no_output_____
###Markdown
Next build a dataset that reads from the TFRecord file and decodes/reformats the images using the `preprocess_image` function you defined earlier.
###Code
image_ds = tf.data.TFRecordDataset('images.tfrec').map(preprocess_image)
###Output
_____no_output_____
###Markdown
Zip that with the labels dataset you defined earlier, to get the expected `(image,label)` pairs.
###Code
ds = tf.data.Dataset.zip((image_ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
This is slower than the `cache` version because you have not cached the preprocessing. Serialized Tensors To save some preprocessing to the TFRecord file, first make a dataset of the processed images, as before:
###Code
paths_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
image_ds = paths_ds.map(load_and_preprocess_image)
image_ds
###Output
_____no_output_____
###Markdown
Now instead of a dataset of `.jpeg` strings, this is a dataset of tensors.To serialize this to a TFRecord file you first convert the dataset of tensors to a dataset of strings.
###Code
ds = image_ds.map(tf.serialize_tensor)
ds
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(ds)
###Output
_____no_output_____
###Markdown
With the preprocessing cached, data can be loaded from the TFRecord file quite efficiently. Just remember to de-serialize the tensor before trying to use it.
###Code
ds = tf.data.TFRecordDataset('images.tfrec')
def parse(x):
result = tf.parse_tensor(x, out_type=tf.float32)
result = tf.reshape(result, [192, 192, 3])
return result
ds = ds.map(parse, num_parallel_calls=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
Now, add the labels and apply the same standard operations as before:
###Code
ds = tf.data.Dataset.zip((ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Because of this output shape, build a model wrapped around MobileNet using `tf.keras.layers.GlobalAveragePooling2D` to average over the space dimensions before the output `tf.keras.layers.Dense` layer:
###Code
model = tf.keras.Sequential([
mobile_net,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(len(label_names), activation = 'softmax')])
###Output
_____no_output_____
###Markdown
Now it produces outputs of the expected shape:
###Code
logit_batch = model(image_batch).numpy()
print("min logit:", logit_batch.min())
print("max logit:", logit_batch.max())
print()
print("Shape:", logit_batch.shape)
###Output
_____no_output_____
###Markdown
Compile the model to describe the training procedure:
###Code
model.compile(optimizer=tf.train.AdamOptimizer(),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
There are 2 trainable variables: the Dense `weights` and `bias`:
###Code
len(model.trainable_variables)
model.summary()
###Output
_____no_output_____
###Markdown
Train the model.Normally you would specify the real number of steps per epoch, but for demonstration purposes only run 3 steps.
###Code
steps_per_epoch=tf.ceil(len(all_image_paths)/BATCH_SIZE).numpy()
steps_per_epoch
model.fit(ds, epochs=1, steps_per_epoch=3)
###Output
_____no_output_____
###Markdown
PerformanceNote: This section just shows a couple of easy tricks that may help performance. For an in depth guide see [Input Pipeline Performance](https://www.tensorflow.org/r1/guide/performance/datasets).The simple pipeline used above reads each file individually, on each epoch. This is fine for local training on CPU but may not be sufficient for GPU training, and is totally inappropriate for any sort of distributed training. To investigate, first build a simple function to check the performance of our datasets:
###Code
import time
def timeit(ds, batches=2*steps_per_epoch+1):
overall_start = time.time()
# Fetch a single batch to prime the pipeline (fill the shuffle buffer),
# before starting the timer
it = iter(ds.take(batches+1))
next(it)
start = time.time()
for i,(images,labels) in enumerate(it):
if i%10 == 0:
print('.',end='')
print()
end = time.time()
duration = end-start
print("{} batches: {} s".format(batches, duration))
print("{:0.5f} Images/s".format(BATCH_SIZE*batches/duration))
print("Total time: {}s".format(end-overall_start))
###Output
_____no_output_____
###Markdown
The performance of the current dataset is:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Cache Use `tf.data.Dataset.cache` to easily cache calculations across epochs. This is especially performant if the data fits in memory.Here the images are cached, after being pre-precessed (decoded and resized):
###Code
ds = image_label_ds.cache()
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
One disadvantage to using an in-memory cache is that the cache must be rebuilt on each run, giving the same startup delay each time the dataset is started:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
If the data doesn't fit in memory, use a cache file:
###Code
ds = image_label_ds.cache(filename='./cache.tf-data')
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(1)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
The cache file also has the advantage that it can be used to quickly restart the dataset without rebuilding the cache. Note how much faster it is the second time:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
TFRecord File Raw image dataTFRecord files are a simple format for storing a sequence of binary blobs. By packing multiple examples into the same file, TensorFlow is able to read multiple examples at once, which is especially important for performance when using a remote storage service such as GCS.First, build a TFRecord file from the raw image data:
###Code
image_ds = tf.data.Dataset.from_tensor_slices(all_image_paths).map(tf.read_file)
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(image_ds)
###Output
_____no_output_____
###Markdown
Next build a dataset that reads from the TFRecord file and decodes/reformats the images using the `preprocess_image` function you defined earlier.
###Code
image_ds = tf.data.TFRecordDataset('images.tfrec').map(preprocess_image)
###Output
_____no_output_____
###Markdown
Zip that with the labels dataset you defined earlier, to get the expected `(image,label)` pairs.
###Code
ds = tf.data.Dataset.zip((image_ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
This is slower than the `cache` version because you have not cached the preprocessing. Serialized Tensors To save some preprocessing to the TFRecord file, first make a dataset of the processed images, as before:
###Code
paths_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
image_ds = paths_ds.map(load_and_preprocess_image)
image_ds
###Output
_____no_output_____
###Markdown
Now instead of a dataset of `.jpeg` strings, this is a dataset of tensors.To serialize this to a TFRecord file you first convert the dataset of tensors to a dataset of strings.
###Code
ds = image_ds.map(tf.serialize_tensor)
ds
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(ds)
###Output
_____no_output_____
###Markdown
With the preprocessing cached, data can be loaded from the TFRecord file quite efficiently. Just remember to de-serialize the tensor before trying to use it.
###Code
ds = tf.data.TFRecordDataset('images.tfrec')
def parse(x):
result = tf.parse_tensor(x, out_type=tf.float32)
result = tf.reshape(result, [192, 192, 3])
return result
ds = ds.map(parse, num_parallel_calls=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
Now, add the labels and apply the same standard operations as before:
###Code
ds = tf.data.Dataset.zip((ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Load images with tf.data Run in Google Colab View source on GitHub > Note: This is an archived TF1 notebook. These are configuredto run in TF2's [compatbility mode](https://www.tensorflow.org/guide/migrate)but will run in TF1 as well. To use TF1 in Colab, use the[%tensorflow_version 1.x](https://colab.research.google.com/notebooks/tensorflow_version.ipynb)magic. This tutorial provides a simple example of how to load an image dataset using `tf.data`.The dataset used in this example is distributed as directories of images, with one class of image per directory. Setup
###Code
import tensorflow.compat.v1 as tf
tf.__version__
AUTOTUNE = tf.data.experimental.AUTOTUNE
###Output
_____no_output_____
###Markdown
Download and inspect the dataset Retrieve the imagesBefore you start any training, you'll need a set of images to teach the network about the new classes you want to recognize. You've created an archive of creative-commons licensed flower photos to use initially.
###Code
import pathlib
data_root_orig = tf.keras.utils.get_file('flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
data_root = pathlib.Path(data_root_orig)
print(data_root)
###Output
_____no_output_____
###Markdown
After downloading 218MB, you should now have a copy of the flower photos available:
###Code
for item in data_root.iterdir():
print(item)
import random
all_image_paths = list(data_root.glob('*/*'))
all_image_paths = [str(path) for path in all_image_paths]
random.shuffle(all_image_paths)
image_count = len(all_image_paths)
image_count
all_image_paths[:10]
###Output
_____no_output_____
###Markdown
Inspect the imagesNow let's have a quick look at a couple of the images, so you know what you're dealing with:
###Code
import os
attributions = (data_root/"LICENSE.txt").open(encoding='utf-8').readlines()[4:]
attributions = [line.split(' CC-BY') for line in attributions]
attributions = dict(attributions)
import IPython.display as display
def caption_image(image_path):
image_rel = pathlib.Path(image_path).relative_to(data_root)
return "Image (CC BY 2.0) " + ' - '.join(attributions[str(image_rel)].split(' - ')[:-1])
for n in range(3):
image_path = random.choice(all_image_paths)
display.display(display.Image(image_path))
print(caption_image(image_path))
print()
###Output
_____no_output_____
###Markdown
Determine the label for each image List the available labels:
###Code
label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())
label_names
###Output
_____no_output_____
###Markdown
Assign an index to each label:
###Code
label_to_index = dict((name, index) for index,name in enumerate(label_names))
label_to_index
###Output
_____no_output_____
###Markdown
Create a list of every file, and its label index
###Code
all_image_labels = [label_to_index[pathlib.Path(path).parent.name]
for path in all_image_paths]
print("First 10 labels indices: ", all_image_labels[:10])
###Output
_____no_output_____
###Markdown
Load and format the images TensorFlow includes all the tools you need to load and process images:
###Code
img_path = all_image_paths[0]
img_path
###Output
_____no_output_____
###Markdown
here is the raw data:
###Code
img_raw = tf.io.read_file(img_path)
print(repr(img_raw)[:100]+"...")
###Output
_____no_output_____
###Markdown
Decode it into an image tensor:
###Code
img_tensor = tf.image.decode_image(img_raw)
print(img_tensor.shape)
print(img_tensor.dtype)
###Output
_____no_output_____
###Markdown
Resize it for your model:
###Code
img_final = tf.image.resize(img_tensor, [192, 192])
img_final = img_final/255.0
print(img_final.shape)
print(img_final.numpy().min())
print(img_final.numpy().max())
###Output
_____no_output_____
###Markdown
Wrap up these up in simple functions for later.
###Code
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [192, 192])
image /= 255.0 # normalize to [0,1] range
return image
def load_and_preprocess_image(path):
image = tf.read_file(path)
return preprocess_image(image)
import matplotlib.pyplot as plt
img_path = all_image_paths[0]
label = all_image_labels[0]
plt.imshow(load_and_preprocess_image(img_path))
plt.grid(False)
plt.xlabel(caption_image(img_path).encode('utf-8'))
plt.title(label_names[label].title())
print()
###Output
_____no_output_____
###Markdown
Build a `tf.data.Dataset` A dataset of images The easiest way to build a `tf.data.Dataset` is using the `from_tensor_slices` method.Slicing the array of strings results in a dataset of strings:
###Code
path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
###Output
_____no_output_____
###Markdown
The `output_shapes` and `output_types` fields describe the content of each item in the dataset. In this case it is a set of scalar binary-strings
###Code
print('shape: ', repr(path_ds.output_shapes))
print('type: ', path_ds.output_types)
print()
print(path_ds)
###Output
_____no_output_____
###Markdown
Now create a new dataset that loads and formats images on the fly by mapping `preprocess_image` over the dataset of paths.
###Code
image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
for n,image in enumerate(image_ds.take(4)):
plt.subplot(2,2,n+1)
plt.imshow(image)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.xlabel(caption_image(all_image_paths[n]))
plt.show()
###Output
_____no_output_____
###Markdown
A dataset of `(image, label)` pairs Using the same `from_tensor_slices` method you can build a dataset of labels
###Code
label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(all_image_labels, tf.int64))
for label in label_ds.take(10):
print(label_names[label.numpy()])
###Output
_____no_output_____
###Markdown
Since the datasets are in the same order you can just zip them together to get a dataset of `(image, label)` pairs.
###Code
image_label_ds = tf.data.Dataset.zip((image_ds, label_ds))
###Output
_____no_output_____
###Markdown
The new dataset's `shapes` and `types` are tuples of shapes and types as well, describing each field:
###Code
print(image_label_ds)
###Output
_____no_output_____
###Markdown
Note: When you have arrays like `all_image_labels` and `all_image_paths`, an alternative to using `tf.data.dataset.Dataset.zip` is slicing the pair of arrays.
###Code
ds = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels))
# The tuples are unpacked into the positional arguments of the mapped function
def load_and_preprocess_from_path_label(path, label):
return load_and_preprocess_image(path), label
image_label_ds = ds.map(load_and_preprocess_from_path_label)
image_label_ds
###Output
_____no_output_____
###Markdown
Basic methods for training To train a model with this dataset you will want the data:* To be well shuffled.* To be batched.* To repeat forever.* To have batches available as soon as possible.These features can be easily added using the `tf.data` api.
###Code
BATCH_SIZE = 32
# Setting a shuffle buffer size as large as the dataset ensures that the data is
# completely shuffled.
ds = image_label_ds.shuffle(buffer_size=image_count)
ds = ds.repeat()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches, in the background while the model is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
There are a few things to note here:1. The order is important. * A `.shuffle` *after* a `.repeat` would shuffle items across epoch boundaries (some items will be seen twice before others are seen at all). * A `.shuffle` *after* a `.batch` would shuffle the order of the batches, but not shuffle the items across batches.1. Use a `buffer_size` the same size as the dataset for a full shuffle. Up to the dataset size, large values provide better randomization, but use more memory.1. The shuffle buffer is filled before any elements are pulled from it. So a large `buffer_size` may cause a delay when your `Dataset` is starting.1. The shuffled dataset doesn't report the end of a dataset until the shuffle-buffer is completely empty. The `Dataset` is restarted by `.repeat`, causing another wait for the shuffle-buffer to be filled.This last point, as well as the order of `.shuffle` and `.repeat`, can be addressed by using the `tf.data.Dataset.apply` method with the fused `tf.data.experimental.shuffle_and_repeat` function:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE)
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Load images with tf.data Run in Google Colab View source on GitHub > Note: This is an archived TF1 notebook. These are configuredto run in TF2's [compatbility mode](https://www.tensorflow.org/guide/migrate)but will run in TF1 as well. To use TF1 in Colab, use the[%tensorflow_version 1.x](https://colab.research.google.com/notebooks/tensorflow_version.ipynb)magic. This tutorial provides a simple example of how to load an image dataset using `tf.data`.The dataset used in this example is distributed as directories of images, with one class of image per directory. Setup
###Code
import tensorflow.compat.v1 as tf
tf.__version__
AUTOTUNE = tf.data.experimental.AUTOTUNE
###Output
_____no_output_____
###Markdown
Download and inspect the dataset Retrieve the imagesBefore you start any training, you'll need a set of images to teach the network about the new classes you want to recognize. You've created an archive of creative-commons licensed flower photos to use initially.
###Code
import pathlib
data_root_orig = tf.keras.utils.get_file('flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
data_root = pathlib.Path(data_root_orig)
print(data_root)
###Output
_____no_output_____
###Markdown
After downloading 218MB, you should now have a copy of the flower photos available:
###Code
for item in data_root.iterdir():
print(item)
import random
all_image_paths = list(data_root.glob('*/*'))
all_image_paths = [str(path) for path in all_image_paths]
random.shuffle(all_image_paths)
image_count = len(all_image_paths)
image_count
all_image_paths[:10]
###Output
_____no_output_____
###Markdown
Inspect the imagesNow let's have a quick look at a couple of the images, so you know what you're dealing with:
###Code
import os
attributions = (data_root/"LICENSE.txt").open(encoding='utf-8').readlines()[4:]
attributions = [line.split(' CC-BY') for line in attributions]
attributions = dict(attributions)
import IPython.display as display
def caption_image(image_path):
image_rel = pathlib.Path(image_path).relative_to(data_root)
return "Image (CC BY 2.0) " + ' - '.join(attributions[str(image_rel)].split(' - ')[:-1])
for n in range(3):
image_path = random.choice(all_image_paths)
display.display(display.Image(image_path))
print(caption_image(image_path))
print()
###Output
_____no_output_____
###Markdown
Determine the label for each image List the available labels:
###Code
label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())
label_names
###Output
_____no_output_____
###Markdown
Assign an index to each label:
###Code
label_to_index = dict((name, index) for index,name in enumerate(label_names))
label_to_index
###Output
_____no_output_____
###Markdown
Create a list of every file, and its label index
###Code
all_image_labels = [label_to_index[pathlib.Path(path).parent.name]
for path in all_image_paths]
print("First 10 labels indices: ", all_image_labels[:10])
###Output
_____no_output_____
###Markdown
Load and format the images TensorFlow includes all the tools you need to load and process images:
###Code
img_path = all_image_paths[0]
img_path
###Output
_____no_output_____
###Markdown
here is the raw data:
###Code
img_raw = tf.io.read_file(img_path)
print(repr(img_raw)[:100]+"...")
###Output
_____no_output_____
###Markdown
Decode it into an image tensor:
###Code
img_tensor = tf.image.decode_image(img_raw)
print(img_tensor.shape)
print(img_tensor.dtype)
###Output
_____no_output_____
###Markdown
Resize it for your model:
###Code
img_final = tf.image.resize(img_tensor, [192, 192])
img_final = img_final/255.0
print(img_final.shape)
print(img_final.numpy().min())
print(img_final.numpy().max())
###Output
_____no_output_____
###Markdown
Wrap up these up in simple functions for later.
###Code
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [192, 192])
image /= 255.0 # normalize to [0,1] range
return image
def load_and_preprocess_image(path):
image = tf.read_file(path)
return preprocess_image(image)
import matplotlib.pyplot as plt
img_path = all_image_paths[0]
label = all_image_labels[0]
plt.imshow(load_and_preprocess_image(img_path))
plt.grid(False)
plt.xlabel(caption_image(img_path).encode('utf-8'))
plt.title(label_names[label].title())
print()
###Output
_____no_output_____
###Markdown
Build a `tf.data.Dataset` A dataset of images The easiest way to build a `tf.data.Dataset` is using the `from_tensor_slices` method.Slicing the array of strings results in a dataset of strings:
###Code
path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
###Output
_____no_output_____
###Markdown
The `output_shapes` and `output_types` fields describe the content of each item in the dataset. In this case it is a set of scalar binary-strings
###Code
print('shape: ', repr(path_ds.output_shapes))
print('type: ', path_ds.output_types)
print()
print(path_ds)
###Output
_____no_output_____
###Markdown
Now create a new dataset that loads and formats images on the fly by mapping `preprocess_image` over the dataset of paths.
###Code
image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE)
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
for n,image in enumerate(image_ds.take(4)):
plt.subplot(2,2,n+1)
plt.imshow(image)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.xlabel(caption_image(all_image_paths[n]))
plt.show()
###Output
_____no_output_____
###Markdown
A dataset of `(image, label)` pairs Using the same `from_tensor_slices` method you can build a dataset of labels
###Code
label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(all_image_labels, tf.int64))
for label in label_ds.take(10):
print(label_names[label.numpy()])
###Output
_____no_output_____
###Markdown
Since the datasets are in the same order you can just zip them together to get a dataset of `(image, label)` pairs.
###Code
image_label_ds = tf.data.Dataset.zip((image_ds, label_ds))
###Output
_____no_output_____
###Markdown
The new dataset's `shapes` and `types` are tuples of shapes and types as well, describing each field:
###Code
print(image_label_ds)
###Output
_____no_output_____
###Markdown
Note: When you have arrays like `all_image_labels` and `all_image_paths`, an alternative to using `tf.data.dataset.Dataset.zip` is slicing the pair of arrays.
###Code
ds = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels))
# The tuples are unpacked into the positional arguments of the mapped function
def load_and_preprocess_from_path_label(path, label):
return load_and_preprocess_image(path), label
image_label_ds = ds.map(load_and_preprocess_from_path_label)
image_label_ds
###Output
_____no_output_____
###Markdown
Basic methods for training To train a model with this dataset you will want the data:* To be well shuffled.* To be batched.* To repeat forever.* To have batches available as soon as possible.These features can be easily added using the `tf.data` api.
###Code
BATCH_SIZE = 32
# Setting a shuffle buffer size as large as the dataset ensures that the data is
# completely shuffled.
ds = image_label_ds.shuffle(buffer_size=image_count)
ds = ds.repeat()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches, in the background while the model is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
There are a few things to note here:1. The order is important. * A `.shuffle` *after* a `.repeat` would shuffle items across epoch boundaries (some items will be seen twice before others are seen at all). * A `.shuffle` *after* a `.batch` would shuffle the order of the batches, but not shuffle the items across batches.1. Use a `buffer_size` the same size as the dataset for a full shuffle. Up to the dataset size, large values provide better randomization, but use more memory.1. The shuffle buffer is filled before any elements are pulled from it. So a large `buffer_size` may cause a delay when your `Dataset` is starting.1. The shuffled dataset doesn't report the end of a dataset until the shuffle-buffer is completely empty. The `Dataset` is restarted by `.repeat`, causing another wait for the shuffle-buffer to be filled.This last point, as well as the order of `.shuffle` and `.repeat`, can be addressed by using the `tf.data.Dataset.apply` method with the fused `tf.data.experimental.shuffle_and_repeat` function:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE)
ds = ds.prefetch(buffer_size=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
* For more on ordering the operations, see [Repeat and Shuffle](https://www.tensorflow.org/r1/guide/performance/datasetsrepeat_and_shuffle) in the Input Pipeline Performance guide. Pipe the dataset to a modelFetch a copy of MobileNet v2 from `tf.keras.applications`.This will be used for a simple transfer learning example.Set the MobileNet weights to be non-trainable:
###Code
mobile_net = tf.keras.applications.MobileNetV2(input_shape=(192, 192, 3), include_top=False)
mobile_net.trainable=False
###Output
_____no_output_____
###Markdown
This model expects its input to be normalized to the `[-1,1]` range:```help(keras_applications.mobilenet_v2.preprocess_input)```...This function applies the "Inception" preprocessing which convertsthe RGB values from [0, 255] to [-1, 1]... So before passing data to the MobileNet model, you need to convert the input from a range of `[0,1]` to `[-1,1]`.
###Code
def change_range(image,label):
return 2*image-1, label
keras_ds = ds.map(change_range)
###Output
_____no_output_____
###Markdown
The MobileNet returns a `6x6` spatial grid of features for each image.Pass it a batch of images to see:
###Code
# The dataset may take a few seconds to start, as it fills its shuffle buffer.
image_batch, label_batch = next(iter(keras_ds))
feature_map_batch = mobile_net(image_batch)
print(feature_map_batch.shape)
###Output
_____no_output_____
###Markdown
Because of this output shape, build a model wrapped around MobileNet using `tf.keras.layers.GlobalAveragePooling2D` to average over the space dimensions before the output `tf.keras.layers.Dense` layer:
###Code
model = tf.keras.Sequential([
mobile_net,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(len(label_names), activation = 'softmax')])
###Output
_____no_output_____
###Markdown
Now it produces outputs of the expected shape:
###Code
logit_batch = model(image_batch).numpy()
print("min logit:", logit_batch.min())
print("max logit:", logit_batch.max())
print()
print("Shape:", logit_batch.shape)
###Output
_____no_output_____
###Markdown
Compile the model to describe the training procedure:
###Code
model.compile(optimizer=tf.train.AdamOptimizer(),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
There are 2 trainable variables: the Dense `weights` and `bias`:
###Code
len(model.trainable_variables)
model.summary()
###Output
_____no_output_____
###Markdown
Train the model.Normally you would specify the real number of steps per epoch, but for demonstration purposes only run 3 steps.
###Code
steps_per_epoch=tf.ceil(len(all_image_paths)/BATCH_SIZE).numpy()
steps_per_epoch
model.fit(ds, epochs=1, steps_per_epoch=3)
###Output
_____no_output_____
###Markdown
PerformanceNote: This section just shows a couple of easy tricks that may help performance. For an in depth guide see [Input Pipeline Performance](https://www.tensorflow.org/r1/guide/performance/datasets).The simple pipeline used above reads each file individually, on each epoch. This is fine for local training on CPU but may not be sufficient for GPU training, and is totally inappropriate for any sort of distributed training. To investigate, first build a simple function to check the performance of our datasets:
###Code
import time
def timeit(ds, batches=2*steps_per_epoch+1):
overall_start = time.time()
# Fetch a single batch to prime the pipeline (fill the shuffle buffer),
# before starting the timer
it = iter(ds.take(batches+1))
next(it)
start = time.time()
for i,(images,labels) in enumerate(it):
if i%10 == 0:
print('.',end='')
print()
end = time.time()
duration = end-start
print("{} batches: {} s".format(batches, duration))
print("{:0.5f} Images/s".format(BATCH_SIZE*batches/duration))
print("Total time: {}s".format(end-overall_start))
###Output
_____no_output_____
###Markdown
The performance of the current dataset is:
###Code
ds = image_label_ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
Cache Use `tf.data.Dataset.cache` to easily cache calculations across epochs. This is especially performant if the data fits in memory.Here the images are cached, after being pre-precessed (decoded and resized):
###Code
ds = image_label_ds.cache()
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
One disadvantage to using an in-memory cache is that the cache must be rebuilt on each run, giving the same startup delay each time the dataset is started:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
If the data doesn't fit in memory, use a cache file:
###Code
ds = image_label_ds.cache(filename='./cache.tf-data')
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds = ds.batch(BATCH_SIZE).prefetch(1)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
The cache file also has the advantage that it can be used to quickly restart the dataset without rebuilding the cache. Note how much faster it is the second time:
###Code
timeit(ds)
###Output
_____no_output_____
###Markdown
TFRecord File Raw image dataTFRecord files are a simple format for storing a sequence of binary blobs. By packing multiple examples into the same file, TensorFlow is able to read multiple examples at once, which is especially important for performance when using a remote storage service such as GCS.First, build a TFRecord file from the raw image data:
###Code
image_ds = tf.data.Dataset.from_tensor_slices(all_image_paths).map(tf.read_file)
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(image_ds)
###Output
_____no_output_____
###Markdown
Next build a dataset that reads from the TFRecord file and decodes/reformats the images using the `preprocess_image` function you defined earlier.
###Code
image_ds = tf.data.TFRecordDataset('images.tfrec').map(preprocess_image)
###Output
_____no_output_____
###Markdown
Zip that with the labels dataset you defined earlier, to get the expected `(image,label)` pairs.
###Code
ds = tf.data.Dataset.zip((image_ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____
###Markdown
This is slower than the `cache` version because you have not cached the preprocessing. Serialized Tensors To save some preprocessing to the TFRecord file, first make a dataset of the processed images, as before:
###Code
paths_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
image_ds = paths_ds.map(load_and_preprocess_image)
image_ds
###Output
_____no_output_____
###Markdown
Now instead of a dataset of `.jpeg` strings, this is a dataset of tensors.To serialize this to a TFRecord file you first convert the dataset of tensors to a dataset of strings.
###Code
ds = image_ds.map(tf.serialize_tensor)
ds
tfrec = tf.data.experimental.TFRecordWriter('images.tfrec')
tfrec.write(ds)
###Output
_____no_output_____
###Markdown
With the preprocessing cached, data can be loaded from the TFRecord file quite efficiently. Just remember to de-serialize the tensor before trying to use it.
###Code
ds = tf.data.TFRecordDataset('images.tfrec')
def parse(x):
result = tf.parse_tensor(x, out_type=tf.float32)
result = tf.reshape(result, [192, 192, 3])
return result
ds = ds.map(parse, num_parallel_calls=AUTOTUNE)
ds
###Output
_____no_output_____
###Markdown
Now, add the labels and apply the same standard operations as before:
###Code
ds = tf.data.Dataset.zip((ds, label_ds))
ds = ds.apply(
tf.data.experimental.shuffle_and_repeat(buffer_size=image_count))
ds=ds.batch(BATCH_SIZE).prefetch(AUTOTUNE)
ds
timeit(ds)
###Output
_____no_output_____ |
tarea5.ipynb | ###Markdown
###Code
from time import time
def ejemplo1( n ):
start_time = time()
c = n + 1
d = c * n
e = n * n
total = c + e - d
print(f"total={ total }")
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
for entrada in range(100,1100,100):
ejemplo1( entrada )
from time import time
def ejemplo2( n ):
start_time = time()
contador = 0
for i in range( n ) :
for j in range( n ) :
contador += 1
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return contador
for entrada in range(100,1100,100):
ejemplo2(entrada)
from time import time
def ejemplo3( n ): # n=4
start_time = time()
x = n * 2 # x = 8
y = 0 # y = 0
for m in range( 100 ): #3
y = x - n # y = 4
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return y
for entrada in range(100,1100,100):
ejemplo3( entrada )
from time import time
def ejemplo4( n ):
start_time = time()
x = 3 * 3.1416 + n
y = x + 3 * 3 - n
z = x + y
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return z
for entrada in range(100,1100,100):
ejemplo4( entrada )
from time import time
def ejemplo5( x ):
start_time=time()
n = 10
for j in range( 0 , x , 1 ):
n = j + n
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return n
for entrada in range(100,1100,100):
ejemplo5( entrada )
from time import time
def ejemplo6( n ):
start_time = time()
data=[[[1 for x in range(n)] for x in range(n)]
for x in range(n)]
suma = 0
for d in range(n):
for r in range(n):
for c in range(n):
suma += data[d][r][c]
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return suma
for entrada in range(100,1100,100):
ejemplo6( entrada )
###Output
Tiempo transcurrido: 0.1499564648 segundos.
Tiempo transcurrido: 1.2060003281 segundos.
Tiempo transcurrido: 3.8068406582 segundos.
Tiempo transcurrido: 9.5664360523 segundos.
Tiempo transcurrido: 18.9416923523 segundos.
Tiempo transcurrido: 37.1017789841 segundos.
Tiempo transcurrido: 52.8487811089 segundos.
Tiempo transcurrido: 82.9493203163 segundos.
Tiempo transcurrido: 120.6990776062 segundos.
Tiempo transcurrido: 167.0003297329 segundos.
###Markdown
###Code
def ejemplo1( n ):
start_time = time()
c = n + 1
d = c * n
e = n * n
total = c + e - d
print(f"total={ total }")
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
for entrada in range(100,1100,100):
ejemplo1(entrada)
def ejemplo2( n ):
start_time = time()
contador = 0
for i in range( n ) :
for j in range( n ) :
contador += 1
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return contador
for entrada in range(100,1100,100):
ejemplo2(entrada )
def ejemplo3( n ):
start_time = time()
x = n * 2
y = 0
for m in range(100):
y= x - n
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return y
for entrada in range(100,1100,100):
ejemplo3(entrada)
def ejemplo4( n ):
start_time = time()
x = 3 * 3.1416 + n
y = x + 3 * 3 - n
z = x + y
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return z
for entrada in range(100,1100,100):
ejemplo4(entrada)
def ejemplo5( x ):
start_time = time()
n = 10
for j in range( 0 , x , 1 ):
n = j + n
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return n
for entrada in range(100,1100,100):
ejemplo5(entrada)
def ejemplo6( n ):
start_time = time()
data=[[[1 for x in range(n)] for x in range(n)]
for x in range(n)]
suma = 0
for d in range(n):
for r in range(n):
for c in range(n):
suma += data[d][r][c]
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return suma
for entrada in range(100,1100,100):
ejemplo6(entrada)
###Output
Tiempo transcurrido: 0.1900041103 segundos.
Tiempo transcurrido: 1.0157248974 segundos.
Tiempo transcurrido: 3.6247894764 segundos.
Tiempo transcurrido: 9.4906835556 segundos.
Tiempo transcurrido: 17.7030539513 segundos.
Tiempo transcurrido: 32.4627053738 segundos.
Tiempo transcurrido: 51.8274359703 segundos.
Tiempo transcurrido: 77.4117968082 segundos.
Tiempo transcurrido: 111.3801238537 segundos.
Tiempo transcurrido: 158.6797108650 segundos.
###Markdown
###Code
from time import time
def ejemplo1( n ):
start_time = time()
c = n + 1
d = c * n
e = n * n
total = c + e - d
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
for entrada in range(100, 1100, 100):
ejemplo1(entrada)
from time import time
def ejemplo2( n ):
start_time = time()
contador = 0
for i in range( n ) :
for j in range( n ) :
contador += 1
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return contador
for entrada in range(100, 1100, 100):
ejemplo2(entrada)
from time import time
def ejemplo3( n ): # n=4
x = n * 2 # x = 8
start_time = time()
y = 0 # y = 0
for m in range( 100 ): #3
y = x - n # y = 4
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return y
for entrada in range(100, 1100, 100):
ejemplo3(entrada)
from time import time
def ejemplo4( n ):
start_time = time()
x = 3 * 3.1416 + n
y = x + 3 * 3 - n
z = x + y
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return z
for entrada in range(100, 1100, 100):
ejemplo4(entrada)
from time import time
def ejemplo5( x ):
start_time = time()
n = 10
for j in range( 0 , x , 1 ):
n = j + n
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return n
for entrada in range(100, 1100, 100):
ejemplo5(entrada)
def ejemplo6( n ):
data=[[[1 for x in range(n)] for x in range(n)]
for x in range(n)]
suma = 0
for d in range(n):
for r in range(n):
for c in range(n):
suma += data[d][r][c]
return suma
print(ejemplo6( 5 ))
from time import time
def ejemplo6( n ):
start_time = time()
data=[[[1 for x in range(n)] for x in range(n)]
for x in range(n)]
suma = 0
for d in range(n):
for r in range(n):
for c in range(n):
suma += data[d][r][c]
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return suma
for entrada in range(100, 1100, 100):
ejemplo6(entrada)
###Output
Tiempo transcurrido: 0.1589720249 segundos.
Tiempo transcurrido: 1.0823802948 segundos.
Tiempo transcurrido: 6.5370461941 segundos.
Tiempo transcurrido: 11.2187902927 segundos.
Tiempo transcurrido: 19.3148763180 segundos.
Tiempo transcurrido: 36.2974433899 segundos.
Tiempo transcurrido: 54.0428769588 segundos.
Tiempo transcurrido: 84.5467221737 segundos.
Tiempo transcurrido: 116.0469753742 segundos.
Tiempo transcurrido: 162.7701656818 segundos.
|
docs/example_notebooks/utilities/Simple MSMS plotter.ipynb | ###Markdown
1. Import Python Packages
###Code
%matplotlib notebook
import sys
sys.path.insert(1,'/global/project/projectdirs/metatlas/anaconda/lib/python2.7/site-packages' )
from metatlas.helpers import metatlas_get_data_helper_fun as ma_data
import metatlas.metatlas_objects as metob
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
my_file = metob.retrieve('Lcmsrun',name='20150910%_WT_%Run61%',username='*')[-1]
df_container = ma_data.df_container_from_metatlas_file(my_file)
print df_container.keys()
df = df_container['ms2_neg']
df = df[abs(df.rt-7.495)<0.001]
df.sort_values('i',axis=0,ascending=False,inplace=True)
df.head()
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(111)
ax.set_xlabel('m/z',fontsize=12,weight='bold')
ax.set_ylabel('intensity',fontsize=12,weight='bold')
ax.vlines(df['mz'],np.zeros(len(df['mz'])),df['i'],colors='r',linewidth = 2)
labels = [1.001e9]
for i,row in df.iterrows():
if np.min(np.abs(row.mz - labels)) > 0.1 and row.i > 0.01 * df.i.max():
ax.annotate('%.4f'%row.mz, xy=(row.mz, 1.01*row.i),rotation = 90, horizontalalignment = 'center', verticalalignment = 'left')
labels.append(row.mz)
# plt.axhline()
# plt.tight_layout()
# L = plt.ylim()
ax.set_yscale('log')
plt.ylim(1e4,1e7)
###Output
_____no_output_____ |
SQ2EOM.ipynb | ###Markdown
From Second Quantization to Equation-of-Motion Coupled-Cluster using SymPy Table of contents 1. [Introduction](Introduction)2. [Second Quantization](Second-Quantization)3. [Normal product](Normal-product)4. [Contraction](Contraction)5. [Wicks theorem](Wicks-theorem)6. [Particle-Hole formalism](Particle-Hole-formalism)6. [Hartree-Fock](Hartree-Fock)7. [Baker-Campbell-Hausdorff](Baker-Campbell-Hausdorff)8. [Coupled-Cluster](Coupled-Cluster)9. [Equation-of-motion Coupled-Cluster](Equation-of-motion-Coupled-Cluster) IntroductionWe will have hands on tutorial for the derivation of EOM-CCSD amplitudes, one and two particle density matrix. I have developed a symbolic library using [SymPy](https://www.sympy.org/en/index.html) for deriving analytical expressions which can be easily extended to any operator. First, we will derive the fermionic algebra from first quantization and study their properties. $\newcommand{\ket}[1]{\left|{1}\right\rangle}\newcommand{\bra}[1]{\left\langle{1}\right|}\newcommand{\braket}[2]{\left\langle{1}\middle|{2}\right\rangle}$ Second QuantizationIn this section we will derive the fermionic algebra in second quantization representation from first quantization. Once the fermionic algebra is established, we will derive the relation between the its elements$$ \ket{k} = {a^{+}_{k}}\ket{vac} $$The opertor $a^{+}_{k}$ has created an electron on the vaccum state. $$\phi_{k}(1) \longleftrightarrow {a^{+}_{k}}\ket{vac} $$$\ket{vac}$ is an abstract vaccum state and $\braket{vac}{vac} = 1$If there are two electrons the wavefunction in first quantization is $$ \Phi(1,2) = \frac{1}{\sqrt{2}} \begin{vmatrix}\phi_{i}(1) & \phi_{k}(1) \\\phi_{i}(2) & \phi_{k}(2) \end{vmatrix} $$The same in second quantization follows as (keeping normalization included) $$\ket{ik} = {a^{+}_{i}a^{+}_{k}}\ket{vac} $$ As $\Phi(1,2)$ is an antisymmetric function therefore the creation operator in second quantization should respect this antisymmetry. $$\Phi(2,1) = - \Phi(1,2) \longleftrightarrow {a^{+}_{k}a^{+}_{i}}\ket{vac} = - {a^{+}_{i}a^{+}_{k}}\ket{vac} $$One can see that the antisymmetry requirement is fulfilled by having the anticommutation relation between creation operators.$$a^{+}_{k}a^{+}_{i} + a^{+}_{i}a^{+}_{k} = \{a^{+}_{k} , a^{+}_{i} \} = 0 \tag{QED 1} $$ Now we will repeat the same excercice for anhilation operators. Consider first one-electron system by creating an electron on $i$th orbital using $a_{i}^{+}$. We can define the anhilator operator $a_{i}$, which act on the former state and returns back the system to $\ket{vac}$. $$a_{i} a_{i}^{+} \ket{vac} = \ket{vac}$$ When there is no electron in a orbital $i$, then it should result to zero. $$a_{i} \ket{vac} = 0$$In order to establish the anticommutation of anhilation operators we will utilize anticommutation relation of creation operators.$$ a_{i} a_{k} \ket{ki} = \ket{vac} \ \ \ \& \ \ \ a_{k} a_{i} \ket{ik} = \ket{vac} = -a_{k} a_{i} \ket{ki}$$Therefore, $$ a_{i}a_{k} + a_{k}a_{i} = \{a_{k}, a_{i}\} = 0 \tag{QED 2}$$ Now we already have two anticommutation relation and will try to derive the third, between creation and anhilation operators. Consider two orbitals $i \ \& \ k$, where $i\neq k$$$a_{i}a_{k}^{+}\ket{i} = a_{i}\ket{ki} = - a_{i}\ket{ik} = - \ket{k}$$Reversing the action, $$a_{k}^{+} a_{i}\ket{i} = \ket{k}$$Comparing both $$a_{k}^{+} a_{i} + a_{i}a_{k}^{+} = \{ a_{i}, a_{k}^{+} \} = 0 $$But when $i = k$, then $$a_{i}^{+} a_{i} + a_{i}a_{i}^{+} = \{ a_{i}, a_{i}^{+} \} = 1 $$Therefore the final expression which captures both sitation is $$a_{i}^{+} a_{k} + a_{k}a_{i}^{+} = \{ a_{i}^{+}, a_{k} \} = \delta_{ik} \tag{QED 3}$$ Last, we need to establish the adjoint relation between anhilation and creation operator. Consider From first quantization $$\braket {\phi_{i}}{\phi_{k}} = \delta_{ik}$$Using second quantization on right hand side where $\dagger$ indicates hermitian conjugate $$\bra{vac}(a_{i}^{+})^{\dagger} a_{k}^{+}\ket{vac} = \delta_{ik}$$It is only true if $$(a_{i}^{+})^{\dagger} = a_{i} \tag{QED 4}$$Hence, $a_{i}^{+} = a_{i}^{\dagger}$ which means creation operator for $i$th orbital is hermitian conjugate ofanhilation operator for the same orbital. From now on for creation operator we will use $\dagger$ as superscript instead of $+$ Particle number operatorWe succesfully showed the transformation of basis from first quantization to second quantization by creating fermionic algebra. Now we will do the same for operators. Lets look at number operator for $N$ fermions in $N$ orthogonal basis. The wavefuntion can be written as $$\ket{\Psi_{N}} = a^{\dagger}_{N}a^{\dagger}_{N-1} ... a^{\dagger}_{2} a^{\dagger}_{1}\ket{vac}$$Define number operator for $k$th orbital $$a^{\dagger}_{k}a_{k} $$Act this opeartor on $\ket{\Psi_{N}}$$$a^{\dagger}_{k}a_{k} \ket{\Psi_{N}} = a^{\dagger}_{k}a_{k} (a^{\dagger}_{N}a^{\dagger}_{N-1} ... a^{\dagger}_{k} ... a^{\dagger}_{2} a^{\dagger}_{1}\ket{vac})$$Using eq.(1) and eq.(3), both $a^{\dagger}_{k}a_{k}$ can move next to $a^{\dagger}_{k}$ inside $\Psi_{N}$$$a^{\dagger}_{k}a_{k} \ket{\Psi_{N}} = a^{\dagger}_{N}a^{\dagger}_{N-1} ...a^{\dagger}_{k}a_{k}a^{\dagger}_{k} ... a^{\dagger}_{2} a^{\dagger}_{1}\ket{vac} = a^{\dagger}_{N}a^{\dagger}_{N-1} ...a^{\dagger}_{k}... a^{\dagger}_{2} a^{\dagger}_{1}\ket{vac} = +1\ket{\Psi_{N}}$$ Particle number operator for $N$ particles$$\sum_{i=1}^{N} a^{\dagger}_{i}a_{i} \ket{\Psi_{N}} = N \ket{\Psi_{N}} $$ Parity of permutation$$p q r s \xrightarrow[]{(12)(34)} q p s r$$ The total number of transposition required to decompose any permutation $P$ is known as its parity, $\mathcal{p}$If the total number is even then its called even parityIf the total number is odd then its called odd paritySign of permutation $P$ is defined as $$sgn(P) = -1^{p}$$Hence,$$sgn(P_{odd}) = -1 \hspace{2cm} sgn(P_{even}) = 1$$ Normal product Given the product of operators, the normal product is defined as rearrangment of operators such that all the creation operator are moved to left and all the anhilation operator to right, then multiply it by sign of permutation.Example: Consiper $Q = a_{p}^{\dagger}a_{q}a_{r}^{\dagger}$Now, the in order to write normal product of $Q$, one has to permute $a_{q}a_{r}^{\dagger}$ and multiply thw whole by -1 ( $\because$ odd parity permutation).$$n[a_{p}^{\dagger}a_{q}a_{r}^{\dagger}] = -a_{p}^{\dagger}a_{r}^{\dagger}a_{q} $$Excercise: $Q = a_{s}a_{p}^{\dagger}a_{r}a_{q}^{\dagger}$1) $n[Q]$ 2) $n[n[Q]]$ (nomal product is idempotent operator) Solution: $Q = a_{s}a_{p}^{\dagger}a_{r}a_{q}^{\dagger}$1) $n[Q]=-a_{q}^{\dagger}a_{p}^{\dagger}a_{r}a_{s}=a_{p}^{\dagger}a_{q}^{\dagger}a_{r}a_{s}$ 2) $n[n[Q]]=n[Q]$ Now, we will compute normal product using SymPy
###Code
from secondquant_gen import AnnihilateFermion, CreateFermion, NO
from sympy import symbols
p,q,r,s = symbols('p,q,r,s')
Q = AnnihilateFermion(s)*CreateFermion(p)*AnnihilateFermion(r)*CreateFermion(q)
display(Q)
n = NO(Q)
nn = NO(n)
display(n,nn)
###Output
_____no_output_____
###Markdown
Contraction Contraction between two operators is defined as itself minus its normal product. Symbolically Example: Compute contraction of $Q = a_{p}a_{q}$Excercise: Compute contraction of the following 1) $a_{p}a_{q}^{\dagger}$2) $a_{p}^{\dagger}a_{q}$3) $a_{p}^{\dagger}a_{q}^{\dagger}$ Solution:Now, we will compute normal product using SymPy
###Code
from secondquant_gen import contraction, F, Fd # F = AnnihilateFermion, Fd = CreateFermion
from sympy import symbols
p,q = symbols('p q')
C1 = contraction(F(p),F(q))
C2 = contraction(F(p),Fd(q))
C3 = contraction(Fd(p),F(q))
C4 = contraction(Fd(p),Fd(q))
display(C1,C2,C3,C4)
###Output
_____no_output_____
###Markdown
Normal products with ContractionsGiven normal product of operators with contraction of operators inside the normal product. Then take all the contractions out in pairs and same order, leave uncontracted operators inside normal product and multiply sign of permutation. Example:  Example:  Wicks Theorem Given a product of operators then it can be rewritten as normal product of the same plus normal product with all possible contractions inside. Example: Now, we will show use of wicks theorem using SymPy
###Code
from secondquant_gen import wicks, F, Fd
from sympy import symbols, Dummy
p,q,r,s,t,u = symbols('p q r s t u')
E1 = wicks(F(p)*Fd(q)*Fd(r))
display(E1)
E2 = wicks(F(p)*F(q)*Fd(r)*F(s)*Fd(t)*Fd(u))
display(E2)
###Output
_____no_output_____
###Markdown
Expectation value with $\ket{vac}$Let $Q = M_{1}M_{2}M_{3} ... M_{n-1}M_{n}$ product of $n$ operators From this one can easily see the importance of Wick's theorem. Lets do some checks using SymPy Exercise:$Q = a_{p}a_{q}^{\dagger}a_{r}^{\dagger}$$Q = a_{p}a_{q}a_{r}^{\dagger}a_{s}a_{t}^{\dagger}a_{u}^{\dagger}$Compute $\bra{vac}Q\ket{vac}$?Solution:1. Conver $Q$ to normal order using wicks theorem.2. Only terms which are fully contracted will result in non-zero
###Code
from secondquant_gen import wicks, F, Fd
from sympy import symbols
p,q,r,s,t,u = symbols('p q r s t u')
E1 = wicks(F(p)*Fd(q)*Fd(r),keep_only_fully_contracted=True)
display(E1)
E2 = wicks(F(p)*F(q)*Fd(r)*F(s)*Fd(t)*Fd(u),keep_only_fully_contracted=True)
display(E2)
###Output
_____no_output_____
###Markdown
Particle Hole formalism This procedure makes the evaluation of certain matrix elements easier like $$\bra{\Phi_{0}}\hat{H}\ket{\Phi_{0}}$$$$\bra{\Phi_{i}^{a}}\hat{H}\ket{\Phi_{0}}$$$$\bra{\Phi_{ij}^{ab}}\hat{H}\ket{\Phi_{0}}$$where, $\Phi_{0}$ is Hartree-Fock determinant. Given a Hartree-Fock solution we get occupied and virtual orbitals. Lets assign symbols $(i,j,k,...)$ to occupied and $(a,b,c,...)$ to virtual. We will define new set of operators $$ b_{p} =\left\{ \begin{array}{ll} a_{p}^{\dagger} & \mbox{if } p = i \\ a_{p} & \mbox{if } p = a \end{array}\right.$$$$ b_{p}^\dagger =\left\{ \begin{array}{ll} a_{p} & \mbox{if } p = i \\ a_{p}^{\dagger} & \mbox{if } p = a \end{array}\right.$$This makes the HF determinant a Fermi vaccum$$ b_{p}\ket{\Phi_{0}} = 0$$This follows $$ \{ b_{p},b_{q} \} = 0 \hspace{1cm} \{ b_{p}^{\dagger},b_{q}^{\dagger} \} = 0 \hspace{1cm} \{ b_{p},b_{q}^\dagger \} = \delta_{pq} $$ Normal product of PHFIt is defined similarly, as rearrangment of operators ($b_{p}$ \& $b_{q}^{\dagger}$) such that all the creation operator are moved to left and all the anhilation operator to right, then multiply it by sign of permutation.Example: Consiper $R = b_{p}^{\dagger}b_{q}b_{r}^{\dagger}$Now, the in order to write normal product of $R$, one has to permute $b_{q}b_{r}^{\dagger}$ and multiply thw whole by -1 ( $\because$ odd parity permutation).$$N[b_{p}^{\dagger}b_{q}b_{r}^{\dagger}] = -b_{p}^{\dagger}b_{r}^{\dagger}b_{q} $$The thing one has to focus is this definition shows different result that $n[ ]$.Lets do an excercise to explain this. We are going to compute normal order of $a_{p}^{\dagger}a_{q}a_{r}^{\dagger}$ for $p=i,q=j,r=a$$$N[a_{i}^{\dagger}a_{j}a_{a}^{\dagger}] = N[b_{i}b_{j}^{\dagger}b_{a}^{\dagger}] = b_{j}^{\dagger}b_{a}^{\dagger}b_{i} = -b_{a}^{\dagger}b_{j}^{\dagger}b_{i} = -a_{a}^{\dagger}a_{j}a_{i}^{\dagger} $$Also compute $$n[a_{i}^{\dagger}a_{j}a_{a}^{\dagger}] = -a_{i}^{\dagger}a_{a}^{\dagger}a_{j} = a_{a}^{\dagger}a_{i}^{\dagger}a_{j} = a_{a}^{\dagger}\delta_{ij} - a_{a}^{\dagger}a_{j}^{\dagger}a_{i} $$
###Code
from sympy.physics.secondquant import NO, F, Fd # F = AnnihilateFermion, Fd = CreateFermion
from sympy import symbols
i,j = symbols('i j', below_fermi=True)
a = symbols('a', above_fermi=True)
display(NO(Fd(i)*F(j)*Fd(a)))
###Output
_____no_output_____
###Markdown
Contractions in PHF Same way they are defined but the operators are from PHF Similarlry the contraction inside normal product, Wicks theorem are defined. Expectation value with $\ket{\Phi_{0}}$Let $R = M_{1}M_{2}M_{3} ... M_{n-1}M_{n}$ product of $n$ operators From this one can easily see the importance of Wick's theorem. Lets do some checks using SymPy and derive Hartree-Fock energy Electronic Hamiltonian$$H_{el} = \sum_{pq} h_{pq} a_{p}^{\dagger} a_{q} + \frac{1}{4}\sum_{pqrs} v^{pq}_{rs} a_{p}^{\dagger}a_{q}^{\dagger}a_{s}a_{r}$$where, $$h_{pq} = \bra{\phi_{p} (x)}(-\frac{1}{2}\nabla^{2} -\sum_{\alpha} \frac{Z_{\alpha}}{r_{\alpha x}} )\ket{\phi_{q} (x)}$$$$v_{pqrs} = \bra{\phi_{p} \phi_{q}}\ket{\phi_{r} \phi_{s}} = \braket{\phi_{p} \phi_{q}}{\phi_{r}\phi_{s}} - \braket{\phi_{p} \phi_{q}}{\phi_{s}\phi_{r}}$$ Hartree-FockLets find Hartree-Fock energy
###Code
from sympy.physics.secondquant import wicks, F, Fd, NO # F = AnnihilateFermion, Fd = CreateFermion
from sympy.physics.secondquant import substitute_dummies, contraction
from sympy.physics.secondquant import AntiSymmetricTensor, evaluate_deltas
from sympy import symbols, Rational, Dummy
p, q, r, s = symbols('p,q,r,s', cls=Dummy)
h = AntiSymmetricTensor('h', (p,), (q,))
pq = Fd(p)*F(q)
v = AntiSymmetricTensor('v', (p, q), (r, s))
pqsr = Fd(p)*Fd(q)*F(s)*F(r)
H = h*pq + Rational(1, 4)*v*pqsr
display(H) # Without summation
eq = wicks(H,keep_only_fully_contracted=True)
index_rule = {'below': 'ijklmno','above': 'abcde', 'general': 'pqrs'}
eq = substitute_dummies(eq, new_indices=True, pretty_indices=index_rule)
display(eq)
eq = evaluate_deltas(eq)
display(eq)
###Output
_____no_output_____
###Markdown
Normal ordered Hamiltonian$$H_{N} = \sum_{pq} h_{pq} N[a_{p}^{\dagger} a_{q}] + \frac{1}{4}\sum_{pqrs} v^{pq}_{rs} N[a_{p}^{\dagger}a_{q}^{\dagger}a_{s}a_{r}]$$One can show that $$H_{el} = H_{N} + E_{HF}$$From now on we will use $H_{N}$ or computing matrix elements, also $H_{N}$ measures the correlation effect Baker-Campbell-Hausdorff$$e^{-B} A e^{B} = A + B + [A,B] + \frac{1}{2!}[[A,B],B] + \frac{1}{3!}[[[A,B],B],B] + ... $$if $A$ is electronic Hamiltonian or normal ordered Hamiltonian and $B$ any excitation operator, then due to the stucture of Hamiltonian the series truncate after fourth expansion. This is the reason we call coupled cluster as coupled cluster. Coupled-ClusterCoupled Cluster Singles and Doubles (CCSD)$$\Psi_{CCSD} = e^{T} \ket{\Phi_{0}}$$where, $$ T_{1} = \sum_{ia}t_{i}^{a}a^{\dagger}i$$$$ T_{2} = \frac{1}{4}\sum_{ijab}t_{ij}^{ab}a^{\dagger}b^{\dagger}ji$$Equations which need to be solved$$ E_{CCSD}^{cor} = \bra{\Phi_{0}}e^{-T} H_{N} e^{T} \ket{\Phi_{0}} \tag{Correltion energy}$$ $$ 0 = \bra{\Phi_{i}^{a}}e^{-T} H_{N} e^{T} \ket{\Phi_{0}} \tag{$T_{1}$ amplitude}$$ $$ 0 = \bra{\Phi_{ij}^{ab}}e^{-T} H_{N} e^{T} \ket{\Phi_{0}} \tag{$T_{2}$ amplitude}$$ Lets see the power of SymPy now,
###Code
from sympy.physics.secondquant import wicks, F, Fd, NO # F = AnnihilateFermion, Fd = CreateFermion
from sympy.physics.secondquant import substitute_dummies, contraction
from sympy.physics.secondquant import AntiSymmetricTensor, evaluate_deltas
from sympy.physics.secondquant import PermutationOperator, simplify_index_permutations
from sympy import symbols, Rational, Dummy
import BCH
p, q, r, s = symbols('p,q,r,s', cls=Dummy)
i,j = symbols('i,j' , below_fermi=True)
a,b = symbols('a,b' , above_fermi=True)
f = AntiSymmetricTensor('f', (p,), (q,))
pq = NO(Fd(p)*F(q))
v = AntiSymmetricTensor('v', (p, q), (r, s))
pqsr = NO(Fd(p)*Fd(q)*F(s)*F(r))
H = f*pq + Rational(1, 4)*v*pqsr
ccsd = BCH.level(H,"SD")
eq = wicks(ccsd,simplify_kronecker_deltas=True,keep_only_fully_contracted=True)
index_rule = {'below': 'ijklmno','above': 'abcdef', 'general': 'pqrs'}
e_ccsd = substitute_dummies(eq, new_indices=True, pretty_indices=index_rule)
display(e_ccsd)
eq = wicks(Fd(i)*F(a)*ccsd,simplify_kronecker_deltas=True,keep_only_fully_contracted=True)
index_rule = {'below': 'jklmno','above': 'bcdef', 'general': 'pqrs'}
t1 = (substitute_dummies(eq, new_indices=True, pretty_indices=index_rule))
display(t1)
eq = wicks(Fd(i)*Fd(j)*F(b)*F(a)*ccsd,simplify_kronecker_deltas=True,keep_only_fully_contracted=True)
index_rule = {'below': 'klmno','above': 'cdef', 'general': 'pqrs'}
t2 = substitute_dummies(eq, new_indices=True, pretty_indices=index_rule)
P = PermutList = [PermutationOperator(i,j),PermutationOperator(a,b)]
t2 = simplify_index_permutations(t2,PermutList)
display(t2)
###Output
_____no_output_____
###Markdown
Equation-of-motion Coupled-ClusterEOM-CC allows to compute excited and open-shell character electronic states. There are many flavor depending on the target state from reference. Energy and amplitude equations of EOM-CC$$\bar{H} = e^{-T} H_{N} e^{T}$$$$\bra{\Phi_{ij..}^{ab..}}\bar{H}\hat{R}\ket{\Phi_{0}} = E_{EOM} \bra{\Phi_{ij..}^{ab..}}\hat{R}\ket{\Phi_{0}}$$$$\bra{\Phi_{ij..}^{ab..}}[\bar{H}-E_{cc},\hat{R}]\ket{\Phi_{0}} = \Delta E_{EOM} \bra{\Phi_{ij..}^{ab..}}\hat{R}\ket{\Phi_{0}}$$Last equation is the one which we will derive using SymPy Example - EOM-IP-CCSD using genralized Davidson method$$\begin{pmatrix} \bar{H}_{SS} - E_{cc} & \bar{H}_{SD} \\ \bar{H}_{DS} & \bar{H}_{DD} - E_{cc} \end{pmatrix} \begin{pmatrix} R_{1}\\ R_{2} \end{pmatrix}=\omega \begin{pmatrix} R_{1}\\ R_{2} \end{pmatrix}$$ and $$\begin{pmatrix} L_{1} & & L_{2} \end{pmatrix} \begin{pmatrix} \bar{H}_{SS}-E_{cc} & \bar{H}_{SD} \\ \bar{H}_{DS} & \bar{H}_{DD}-E_{cc} \end{pmatrix} = \omega \begin{pmatrix} L_{1} & & L_{2}, \end{pmatrix} $$ Using matrix equations Need right $\sigma$ amplitudes of EOM-IP-CCSD amplitudes and for this we need $$\bra{\Phi_{i}}[\bar{H}-E_{cc},\hat{R_{1}}]\ket{\Phi_{0}} = ((\bar{H}_{SS} - E_{cc}) R_{1})$$$$\bra{\Phi_{ij}^{a}}[\bar{H},\hat{R_{1}}]\ket{\Phi_{0}} = (\bar{H}_{DS} R_{1}) $$$$\bra{\Phi_{i}}[\bar{H},\hat{R_{2}}]\ket{\Phi_{0}} = (\bar{H}_{SD} R_{2})$$$$\bra{\Phi_{ij}^{a}}[\bar{H}-E_{cc},\hat{R_{2}}]\ket{\Phi_{0}} =((\bar{H}_{DD} - E_{cc}) R_{2}) $$Therefore, trial vectors are defined as $$\sigma_{1}=((\bar{H}_{SS}-E_{cc})R_{1})+(\bar{H}_{SD}R_{2})$$$$\sigma_{2}=(\bar{H}_{DS}R_{1}) +((\bar{H}_{DD}-E_{cc})R_{2})$$Similarly, left vectors are defined. Lets use power of SymPy for right $\sigma$ amplitudes of EOM-IP-CCSD
###Code
import EOM,SIGMA
flavor1 = "IP"
R0_f1 = EOM.R0(flavor1)
R1_f1 = EOM.R1(flavor1)
R2_f1 = EOM.R2(flavor1)
Rf1 = R0_f1 + R1_f1 + R2_f1
SIGMA.RVECTORS(R0_f1,R1_f1,R2_f1,flavor1)
L0_f1 = EOM.L0(flavor1)
L1_f1 = EOM.L1(flavor1)
L2_f1 = EOM.L2(flavor1)
Lf1 = L0_f1 + L1_f1 + L2_f1
SIGMA.LVECTORS(L0_f1,L1_f1,L2_f1,flavor1)
###Output
_____no_output_____
###Markdown
Properties One particle density matrix (OPDM)$$\gamma^I_{pq}= \bra{\Psi_{I}}p^{\dagger}q\ket{\Psi_{I}}$$ One particle transition density matrix (OPTDM)$$\gamma^{IJ}_{pq}= \bra{\Psi_{I}}p^{\dagger}q\ket{\Psi_{J}}$$
###Code
import EOM, DM, TDM
flavor1 = "IP"
R0_f1 = EOM.R0(flavor1)
R1_f1 = EOM.R1(flavor1)
R2_f1 = EOM.R2(flavor1)
Rf1 = R0_f1 + R1_f1 + R2_f1
L0_f1 = EOM.L0(flavor1)
L1_f1 = EOM.L1(flavor1)
L2_f1 = EOM.L2(flavor1)
Lf1 = L0_f1 + L1_f1 + L2_f1
DM.OPDM(Lf1,Rf1,flavor1)
flavor2 = "CCSD"
R0_f2 = EOM.R0(flavor2)
R1_f2 = EOM.R1(flavor2)
R2_f2 = EOM.R2(flavor2)
Rf2 = R0_f2 + R1_f2 + R1_f2
L0_f2 = EOM.L0(flavor2)
L1_f2 = EOM.L1(flavor2)
L2_f2 = EOM.L2(flavor2)
Lf2 = L0_f2 + L1_f2 + L2_f2
TDM.OPTDM(Lf1,Rf1,Lf2,Rf2,flavor1,flavor2)
###Output
_____no_output_____ |
docs/PROSYN2022.ipynb | ###Markdown
 *Mohammad D. Ashkezari*, *Ginger Armbrust* ProSynFest, March 2022 Table of Contents:* [Installation](installation)* [**Data Retrieval (selected methods)**](dataRetrieval) * [API](api) * [Catalog](catalog) * [Search Catalog](searchCatalog) * [List of Cruises](cruises) * [Cruise Trajectory](cruiseTrajectory) * [Retrieve Dataset](getDataset) * [Subset by Space-Time](spaceTime) * [Colocalize](matchCruise) * [List of Pre-Colocalized Datasets](datasetsWithAncillary) * [Retrieve Dataset With Pre-Colocalized Data](getDatasetWithAncillary) * [Dynamic Climatology](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_vizualization/pycmap_climatology.htmlclimatology) * [Custom SQL Query](query) * [**Data Visulization (selected methods)**](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/pycmap_data_vizualization.html) * [Histogram](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_vizualization/pycmap_histogram.htmlhistogram) * [Time Series](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_vizualization/pycmap_time_series.htmltimeseries) * [Regional Map, Contour Plot, 3D Surface Plot](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_vizualization/pycmap_rm_cp_3d.htmlrmcp3d) * [Section Map, Section Contour](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_vizualization/pycmap_section_map_contour.htmlsectionmapcontour) * [Depth Profile](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_vizualization/pycmap_depth_profile.htmldepthprofile) * [Cruise Track](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_vizualization/pycmap_cruise_track.htmlcruisetrackplot) See Docomentation For More:
###Code
from IPython.display import IFrame
IFrame("https://cmap.readthedocs.io/en/latest/user_guide/API_ref/api_ref.html", width=1400, height=1000)
###Output
_____no_output_____
###Markdown
API: Data Retrieval Table of Contents Installationpycmap can be installed using *pip*: `pip install pycmap`In order to use pycmap, you will need to obtain an API key from SimonsCMAP website:https://simonscmap.com. Note:You may install pycmap on cloud-based jupyter notebooks (such as [Colab](https://colab.research.google.com/)) by running the following command in a code-block: `!pip install pycmap`
###Code
# !pip install pycmap -q #uncomment to install pycmap on Colab
import pycmap
pycmap.__version__
###Output
_____no_output_____
###Markdown
Table of Contents [*API( )*](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/pycmap_api.htmlpycmapapi)To retrieve data, we need to create an instance of the system's API and pass the API key. It is not necessary to pass the API key every time you run pycmap, because the key will be stored locally. The API class has other optional parameters to adjust its behavior. All parameters can be updated persistently at any point in the code.Register at https://simonscmap.com and get and API key, if you haven't already.
###Code
api = pycmap.API(token="9c1794f0-9fde-11ec-a038-49e97ea8abe4")
###Output
_____no_output_____
###Markdown
Table of Contents [*get_catalog()*](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_retrieval/pycmap_catalog.htmlgetcatalog)Returns a dataframe containing the details of all variables at Simons CMAP database. This method requires no input.
###Code
api.get_catalog()
###Output
_____no_output_____
###Markdown
Table of Contents [*search_catalog(keywords)*](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_retrieval/pycmap_search_catalog.htmlsearchcatalog)Returns a dataframe containing a subset of Simons CMAP catalog of variables. All variables at Simons CMAP catalog are annotated with a collection of semantically related keywords. This method takes the passed keywords and returns all of the variables annotated with similar keywords. The passed keywords should be separated by blank space. The search result is not sensitive to the order of keywords and is not case sensitive. The passed keywords can provide any 'hint' associated with the target variables. Below are a few examples: * the exact variable name (e.g. NO3), or its linguistic term (Nitrate) * methodology (model, satellite ...), instrument (CTD, seaflow), or disciplines (physics, biology ...) * the cruise official name (e.g. KOK1606), or unofficial cruise name (Falkor) * the name of data producer (e.g Penny Chisholm) or institution name (MIT) If you searched for a variable with semantically-related-keywords and did not get the correct results, please let us know. We can update the keywords at any point. Example:Returns a list of Nitrite measurements during the Falkor cruise, if exists.
###Code
api.search_catalog('nitrate model')
###Output
_____no_output_____
###Markdown
Table of Contents [*cruises()*](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_retrieval/pycmap_list_cruises.htmllist-cruises)Returns a dataframe containing the list of cruises registered at Simons CMAP.
###Code
api.cruises()
###Output
_____no_output_____
###Markdown
Table of Contents [*cruise_trajectory(cruiseName)*](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_retrieval/pycmap_cruise_trajectory.htmlcruise-traj)Returns a dataframe containing the trajectory of the specified cruise. > **Parameters:** >> **cruiseName: string**>> The official cruise name. If applicable, you may also use cruise “nickname” (‘Diel’, ‘Gradients_1’ …). A full list of cruise names can be retrieved using the `cruises()` method.>> >**Returns:** >> Pandas dataframe.
###Code
api.cruise_trajectory('KM1712')
from pycmap.viz import plot_cruise_track
plot_cruise_track(['KM1712'])
###Output
KM1712 cruise track retrieved.
[0m
###Markdown
Table of Contents [*cruise_variables(cruiseName)*](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_retrieval/pycmap_cruise_variables.htmlcruisevars)Returns a dataframe containing all registered variables (at Simons CMAP) during the specified cruise.> **Parameters:** >> **cruiseName: string**>> The official cruise name. If applicable, you may also use cruise “nickname” (‘Diel’, ‘Gradients_1’ …). A full list of cruise names can be retrieved using the `cruises()` method.>> >**Returns:** >> Pandas dataframe. Example:Returns a list of measured variables during the KM1712 cruise.
###Code
api.cruise_variables('KM1712')
###Output
_____no_output_____
###Markdown
Table of Contents [*get_dataset(tableName)*](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_retrieval/pycmap_retrieve_dataset.htmlretrieve-dataset)Returns the entire dataset. Note that this method does not return the dataset metadata. Use the Metadata method to get the dataset metadata.> **Parameters:** >> **tableName: string**>> Table name (each dataset is stored in a table). A full list of table names can be found in [catalog](https://simonscmap.com/catalog).>> >**Returns:** >> Pandas dataframe.
###Code
api.get_dataset("tblAMT13_Chisholm")
###Output
_____no_output_____
###Markdown
Table of Contents [*space_time(table, variable, dt1, dt2, lat1, lat2, lon1, lon2, depth1, depth2)*](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_retrieval/pycmap_subset_ST.htmlsubset-st)Returns a subset of data according to the specified space-time constraints (dt1, dt2, lat1, lat2, lon1, lon2, depth1, depth2).The results are ordered by time, lat, lon, and depth (if exists), respectively. > **Parameters:** >> **table: string**>> Table name (each dataset is stored in a table). A full list of table names can be found in [catalog](https://simonscmap.com/catalog).>> >> **variable: string**>> Variable short name which directly corresponds to a field name in the table. A subset of this variable is returned by this method according to the spatio-temporal cut parameters (below). Pass **'\*'** wild card to retrieve all fields in a table. A full list of variable short names can be found in [catalog](https://simonscmap.com/catalog).>> >> **dt1: string**>> Start date or datetime. This parameter sets the lower bound of the temporal cut. Example values: '2016-05-25' or '2017-12-10 17:25:00'>> >> **dt2: string**>> End date or datetime. This parameter sets the upper bound of the temporal cut. >> >> **lat1: float**>> Start latitude [degree N]. This parameter sets the lower bound of the meridional cut. Note latitude ranges from -90° to 90°.>> >> **lat2: float**>> End latitude [degree N]. This parameter sets the upper bound of the meridional cut. Note latitude ranges from -90° to 90°.>> >> **lon1: float**>> Start longitude [degree E]. This parameter sets the lower bound of the zonal cut. Note longitue ranges from -180° to 180°.>> >> **lon2: float**>> End longitude [degree E]. This parameter sets the upper bound of the zonal cut. Note longitue ranges from -180° to 180°.>> >> **depth1: float**>> Start depth [m]. This parameter sets the lower bound of the vertical cut. Note depth is a positive number (it is 0 at surface and grows towards ocean floor).>> >> **depth2: float**>> End depth [m]. This parameter sets the upper bound of the vertical cut. Note depth is a positive number (it is 0 at surface and grows towards ocean floor).>**Returns:** >> Pandas dataframe. Example:This example retrieves a subset of in-situ salinity measurements by [Argo floats](https://cmap.readthedocs.io/en/latest/catalog/datasets/Argo.htmlargo).
###Code
api.space_time(
table='tblArgoMerge_REP',
variable='argo_merge_salinity_adj',
dt1='2015-05-01',
dt2='2015-05-30',
lat1=28,
lat2=38,
lon1=-71,
lon2=-50,
depth1=0,
depth2=100
)
###Output
_____no_output_____
###Markdown
Table of Contents [*along_track(cruise, targetTables, targetVars, depth1, depth2, temporalTolerance, latTolerance, lonTolerance, depthTolerance)*](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/pycmap_match_cruise_track_datasets.htmlmatchcruise)This method colocalizes a cruise trajectory with the specified target variables. The matching results rely on the tolerance parameters because these parameters set the matching boundaries between the cruise trajectory and target datasets. Please note that the number of matching entries for each target variable might vary depending on the temporal and spatial resolutions of the target variable. In principle, if the cruise trajectory is fully covered by the target variable's spatio-temporal range, there should always be matching results if the tolerance parameters are larger than half of their corresponding spatial/temporal resolutions. Please explore the [catalog](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/pycmap_catalog.htmlgetcatalog) to find appropriate target variables to colocalize with the desired cruise. This method returns a dataframe containing the cruise trajectory joined with the target variable(s). > **Parameters:** >> **cruise: string**>> The official cruise name. If applicable, you may also use cruise "nickname" ('Diel', 'Gradients_1' ...). A full list of cruise names can be retrieved using [cruise](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/pycmap_catalog.htmlgetcatalog) method.>> >> **targetTables: list of string**>> Table names of the target datasets to be matched with the cruise trajectory. Notice cruise trajectory can be matched with multiple target datasets. A full list of table names can be found in [catalog](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/pycmap_catalog.htmlgetcatalog).>> >> **targetVars: list of string**>> Variable short names to be matched with the cruise trajectory. A full list of variable short names can be found in [catalog](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/pycmap_catalog.htmlgetcatalog).>> >> **depth1: float**>> Start depth [m]. This parameter sets the lower bound of the depth cut on the traget datasets. 'depth1' and 'depth2' allow matching a cruise trajectory (which is at the surface, hopefully!) with traget varaiables at lower depth. Note depth is a positive number (depth is 0 at surface and grows towards ocean floor).>> >> **depth2: float**>> End depth [m]. This parameter sets the upper bound of the depth cut on the traget datasets. Note depth is a positive number (depth is 0 at surface and grows towards ocean floor).>> >> **temporalTolerance: list of int**>> Temporal tolerance values between the cruise trajectory and target datasets. The size and order of values in this list should match those of targetTables. If only a single integer value is given, that would be applied to all target datasets. This parameter is in day units except when the target variable represents monthly climatology data in which case it is in month units. Notice fractional values are not supported in the current version.>> >> **latTolerance: list of float or int**>> Spatial tolerance values in meridional direction [deg] between the cruise trajectory and target datasets. The size and order of values in this list should match those of targetTables. If only a single float value is given, that would be applied to all target datasets. A "safe" value for this parameter can be slightly larger than the half of the traget variable's spatial resolution.>> >> **lonTolerance: list of float or int**>> Spatial tolerance values in zonal direction [deg] between the cruise trajectory and target datasets. The size and order of values in this list should match those of targetTables. If only a single float value is given, that would be applied to all target datasets. A "safe" value for this parameter can be slightly larger than the half of the traget variable's spatial resolution.>> >> **depthTolerance: list of float or int**>> Spatial tolerance values in vertical direction [m] between the cruise trajectory and target datasets. The size and order of values in this list should match those of targetTables. If only a single float value is given, that would be applied to all target datasets. >**Returns:** >> Pandas dataframe. Example:Colocalizes the Gradients_1 cruise with prochloro_abundance and prokaryote_c01_darwin_clim variables from the Seaflow and Darwin (climatology) Data sets, respectively.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import pycmap
api = pycmap.API()
df = api.along_track(
cruise='gradients_3',
targetTables=['tblSeaFlow', 'tblDarwin_Nutrient_Climatology'],
targetVars=['abundance_prochloro', 'PO4_darwin_clim'],
depth1=0,
depth2=5,
temporalTolerance=[0, 0],
latTolerance=[0.01, 0.25],
lonTolerance=[0.01, 0.25],
depthTolerance=[5, 5]
)
################# Simple Plot #################
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
c1, c2 = 'firebrick', 'slateblue'
t1, t2 = 'tblSeaFlow', 'tblDarwin_Nutrient_Climatology'
v1, v2 = 'abundance_prochloro', 'PO4_darwin_clim'
ax1.plot(df['lat'], df[v1], 'o', color=c1, markeredgewidth=0, label='SeaFlow', alpha=0.2)
ax1.tick_params(axis='y', labelcolor='r')
ax1.set_ylabel(v1 + api.get_unit(t1, v1), color='r')
ax2.plot(df['lat'], df[v2], 'o', color=c2, markeredgewidth=0, label='Darwin', alpha=0.2)
ax2.tick_params(axis='y', labelcolor='b')
ax2.set_ylabel(v2 + api.get_unit(t2, v2), color='b')
ax1.set_xlabel('Latitude')
fig.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Table of Contents Custom Colocalization:Sample(source, targets, replaceWithMonthlyClimatolog)Samples the targest datasets using the time-location of the source datasetReturns a dataframe containing the original source data and the joined colocalized target variables.> **Parameters:** >> **source: dataframe**>> A dataframe containing the source datasets (must have time-location columns).>> >> **targets: dict**>> A dcitionary containing the target table/variables and tolerance parameters. The items in `tolerances` list are: temporal tolerance [days], meridional tolerance [deg], >> zonal tolerance [deg], and vertical tolerance [m], repectively.>> Below is an example for `targets` parameter:>> targets = {>> "tblSST_AVHRR_OI_NRT": {>> "variables": ["sst"],>> "tolerances": [1, 0.25, 0.25, 5]>> },>> "tblAltimetry_REP": {>> "variables": ["sla", "adt", "ugosa", "vgosa"],>> "tolerances": [1, 0.25, 0.25, 5]>> }>> }>> >> **replaceWithMonthlyClimatolog: boolean**>> If `True`, monthly climatology of the target variables is colocalized when the target dataset's temporal range does not cover the source data. If `False`, only contemporaneous target data are colocalized. >> >**Returns:** >> Pandas dataframe.
###Code
targets = {
"tblSST_AVHRR_OI_NRT": {
"variables": ["sst"],
"tolerances": [0, 0.25, 0.25, 0]
}
}
pycmap.Sample(
source=api.get_dataset("tblAMT13_Chisholm"),
targets=targets,
replaceWithMonthlyClimatolog=True
)
###Output
Gathering metadata ....
Sampling starts.
282 / 283 ... sampling tblSST_AVHRR_OI_NRT278 / 283 ... sampling tblSST_AVHRR_OI_NRT181 / 283 ... sampling tblSST_AVHRR_OI_NRT/ 283 ... sampling tblSST_AVHRR_OI_NRT249 / 283 ... sampling tblSST_AVHRR_OI_NRT
Sampling ends.
###Markdown
Table of Contents [*datasets_with_ancillary()*](https://github.com/simonscmap/pycmap/blob/master/docs/DatasetsWithAncillary.ipynb)Returns a dataframe containing the list of data sets that have been automatically colocalized with ancillary variables (mostly environmental).A growing number of Simons CMAP datasets are being automatically colocalized with a large (100+) number of ancillary parameters derived from satellite and numerical model products.>**Returns:** >> Pandas dataframe.
###Code
api.datasets_with_ancillary()
###Output
_____no_output_____
###Markdown
Table of Contents [*get_dataset_with_ancillary(tableName)*](https://github.com/simonscmap/pycmap/blob/master/docs/RetrieveDatasetWithAncillary.ipynb)Returns the entire dataset joined with colocalized ancillary variables. The ancillary variable names are prefixed with `CMAP_`.Note that this method does not return the dataset metadata. Use the get_dataset_metadata method to get the dataset metadata.> **Parameters:** >> **tableName: string**>> The name of table associated with the dataset. A full list of table names can be found in the [Data Catalog](https://simonscmap.com/catalog).>> >**Returns:** >> Pandas dataframe.
###Code
api.get_dataset_with_ancillary("tblAMT13_Chisholm")
###Output
_____no_output_____
###Markdown
Table of Contents [*query(query)*](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_retrieval/pycmap_query.htmlquery)Simons CMAP datasets are hosted in a SQL database and pycmap package provides the user with a number of pre-developed methods to extract and retrieve subsets of the data. The rest of this documentation is dedicated to explore and explain these methods. In addition to the pre-developed methods, we intend to leave the database open to custom scan queries for interested users. This method takes a custom SQL query statement and returns the results in form of a Pandas dataframe. The full list of table names and variable names (fields) can be obtained using the [get_catalog()](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_retrieval/pycmap_catalog.htmlgetcatalog) method. In fact, one may use this very method to retrieve the table and field names: `query('EXEC uspCatalog')`. A Dataset is stored in a table and each table field represents a variable. All data tables have the following fields:* [time] [date or datetime] NOT NULL,* [lat] [float] NOT NULL,* [lon] [float] NOT NULL,* [depth] [float] NOT NULL, Note:Tables which represent a climatological dataset, such as 'tblDarwin_Nutrient_Climatology', will not have a 'time' field. Also, if a table represents a surface dataset, such as satellite products, there would be no 'depth' field. 'depth' is a positive number in meters unit; it is zero at the surface growing towards the ocean's floor. 'lat' and 'lon' are in degrees units, ranging from -90° to 90° and -180° to 180°, respectively.Please keep in mind that some of the datasets are massive in size (10s of TB), avoid queries without WHERE clause (`SELECT * FROM TABLENAME`). Always try to add some constraints on time, lat, lon, and depth fields (see the basic examples below). Moreover, the database hosts a wide range of predefined stored procedures and functions to streamline nearly all CMAP data services. For instance retrieving the catalog information is achieved using a single call of this procedure: *uspCatalog*. These predefined procedures can be called using the pycmap package (see example below). Alternatively, one may use any SQL client to execute these procedures to retrieve and visualize data (examples: [Azure Data Studio](https://docs.microsoft.com/en-us/sql/azure-data-studio/download?view=sql-server-ver15), or [Plotly Falcon](https://plot.ly/free-sql-client-download/)). Using the predefined procedures all CMAP data services are centralized at the database layer which dramatically facilitates the process of developing apps with different programming languages (pycmap, web app, cmap4r, ...). Please note that you can improve the current procedures or add new procedures by contributing at the [CMAP database repository](https://github.com/simonscmap/DB). Below is a selected list of stored procedures and functions, their arguments will be described in more details subsequently:* uspCatalog* uspSpaceTime* uspTimeSeries* uspDepthProfile* uspSectionMap* uspCruises* uspCruiseByName* uspCruiseBounds* uspWeekly* uspMonthly* uspQuarterly* uspAnnual* uspMatch* udfDatasetReferences* udfMetaData_NoRefHappy SQL Injection! Example:A sample stored procedure returning the list of all cruises hosted by Simons CMAP.
###Code
api.query('EXEC uspCruises')
###Output
_____no_output_____
###Markdown
Example:A sample query returning the timeseries of sea surface temperature (sst).
###Code
api.query(
'''
SELECT [time], AVG(lat) AS lat, AVG(lon) AS lon, AVG(sst) AS sst FROM tblsst_AVHRR_OI_NRT
WHERE
[time] BETWEEN '2016-06-01' AND '2016-10-01' AND
lat BETWEEN 23 AND 24 AND
lon BETWEEN -160 AND -158
GROUP BY [time]
ORDER BY [time]
'''
)
###Output
_____no_output_____
###Markdown
 *Mohammad D. Ashkezari*, *Ginger Armbrust* ProSynFest, March 2022 Table of Contents:* [Installation](installation)* [**Data Retrieval (selected methods)**](dataRetrieval) * [API](api) * [Catalog](catalog) * [Search Catalog](searchCatalog) * [List of Cruises](cruises) * [Cruise Trajectory](cruiseTrajectory) * [Retrieve Dataset](getDataset) * [Subset by Space-Time](spaceTime) * [Colocalize](matchCruise) * [List of Pre-Colocalized Datasets](datasetsWithAncillary) * [Retrieve Dataset With Pre-Colocalized Data](getDatasetWithAncillary) * [Dynamic Climatology](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_vizualization/pycmap_climatology.htmlclimatology) * [Custom SQL Query](query) * [**Data Visulization (selected methods)**](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/pycmap_data_vizualization.html) * [Histogram](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_vizualization/pycmap_histogram.htmlhistogram) * [Time Series](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_vizualization/pycmap_time_series.htmltimeseries) * [Regional Map, Contour Plot, 3D Surface Plot](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_vizualization/pycmap_rm_cp_3d.htmlrmcp3d) * [Section Map, Section Contour](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_vizualization/pycmap_section_map_contour.htmlsectionmapcontour) * [Depth Profile](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_vizualization/pycmap_depth_profile.htmldepthprofile) * [Cruise Track](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_vizualization/pycmap_cruise_track.htmlcruisetrackplot) See Docomentation For More:
###Code
from IPython.display import IFrame
IFrame("https://cmap.readthedocs.io/en/latest/user_guide/API_ref/api_ref.html", width=1400, height=1000)
###Output
_____no_output_____
###Markdown
API: Data Retrieval Table of Contents Installationpycmap can be installed using *pip*: `pip install pycmap`In order to use pycmap, you will need to obtain an API key from SimonsCMAP website:https://simonscmap.com. Note:You may install pycmap on cloud-based jupyter notebooks (such as [Colab](https://colab.research.google.com/)) by running the following command in a code-block: `!pip install pycmap`
###Code
# !pip install pycmap -q #uncomment to install pycmap on Colab
import pycmap
pycmap.__version__
###Output
_____no_output_____
###Markdown
Table of Contents [*API( )*](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/pycmap_api.htmlpycmapapi)To retrieve data, we need to create an instance of the system's API and pass the API key. It is not necessary to pass the API key every time you run pycmap, because the key will be stored locally. The API class has other optional parameters to adjust its behavior. All parameters can be updated persistently at any point in the code.Register at https://simonscmap.com and get and API key, if you haven't already.
###Code
api = pycmap.API(token="YOUR_API_KEY")
###Output
_____no_output_____
###Markdown
Table of Contents [*get_catalog()*](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_retrieval/pycmap_catalog.htmlgetcatalog)Returns a dataframe containing the details of all variables at Simons CMAP database. This method requires no input.
###Code
api.get_catalog()
###Output
_____no_output_____
###Markdown
Table of Contents [*search_catalog(keywords)*](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_retrieval/pycmap_search_catalog.htmlsearchcatalog)Returns a dataframe containing a subset of Simons CMAP catalog of variables. All variables at Simons CMAP catalog are annotated with a collection of semantically related keywords. This method takes the passed keywords and returns all of the variables annotated with similar keywords. The passed keywords should be separated by blank space. The search result is not sensitive to the order of keywords and is not case sensitive. The passed keywords can provide any 'hint' associated with the target variables. Below are a few examples: * the exact variable name (e.g. NO3), or its linguistic term (Nitrate) * methodology (model, satellite ...), instrument (CTD, seaflow), or disciplines (physics, biology ...) * the cruise official name (e.g. KOK1606), or unofficial cruise name (Falkor) * the name of data producer (e.g Penny Chisholm) or institution name (MIT) If you searched for a variable with semantically-related-keywords and did not get the correct results, please let us know. We can update the keywords at any point. Example:Returns a list of Nitrite measurements during the Falkor cruise, if exists.
###Code
api.search_catalog('nitrate model')
###Output
_____no_output_____
###Markdown
Table of Contents [*cruises()*](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_retrieval/pycmap_list_cruises.htmllist-cruises)Returns a dataframe containing the list of cruises registered at Simons CMAP.
###Code
api.cruises()
###Output
_____no_output_____
###Markdown
Table of Contents [*cruise_trajectory(cruiseName)*](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_retrieval/pycmap_cruise_trajectory.htmlcruise-traj)Returns a dataframe containing the trajectory of the specified cruise. > **Parameters:** >> **cruiseName: string**>> The official cruise name. If applicable, you may also use cruise “nickname” (‘Diel’, ‘Gradients_1’ …). A full list of cruise names can be retrieved using the `cruises()` method.>> >**Returns:** >> Pandas dataframe.
###Code
api.cruise_trajectory('KM1712')
from pycmap.viz import plot_cruise_track
plot_cruise_track(['KM1712'])
###Output
KM1712 cruise track retrieved.
[0m
###Markdown
Table of Contents [*cruise_variables(cruiseName)*](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_retrieval/pycmap_cruise_variables.htmlcruisevars)Returns a dataframe containing all registered variables (at Simons CMAP) during the specified cruise.> **Parameters:** >> **cruiseName: string**>> The official cruise name. If applicable, you may also use cruise “nickname” (‘Diel’, ‘Gradients_1’ …). A full list of cruise names can be retrieved using the `cruises()` method.>> >**Returns:** >> Pandas dataframe. Example:Returns a list of measured variables during the KM1712 cruise.
###Code
api.cruise_variables('KM1712')
###Output
_____no_output_____
###Markdown
Table of Contents [*get_dataset(tableName)*](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_retrieval/pycmap_retrieve_dataset.htmlretrieve-dataset)Returns the entire dataset. Note that this method does not return the dataset metadata. Use the Metadata method to get the dataset metadata.> **Parameters:** >> **tableName: string**>> Table name (each dataset is stored in a table). A full list of table names can be found in [catalog](https://simonscmap.com/catalog).>> >**Returns:** >> Pandas dataframe.
###Code
api.get_dataset("tblAMT13_Chisholm")
###Output
_____no_output_____
###Markdown
Table of Contents [*space_time(table, variable, dt1, dt2, lat1, lat2, lon1, lon2, depth1, depth2)*](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_retrieval/pycmap_subset_ST.htmlsubset-st)Returns a subset of data according to the specified space-time constraints (dt1, dt2, lat1, lat2, lon1, lon2, depth1, depth2).The results are ordered by time, lat, lon, and depth (if exists), respectively. > **Parameters:** >> **table: string**>> Table name (each dataset is stored in a table). A full list of table names can be found in [catalog](https://simonscmap.com/catalog).>> >> **variable: string**>> Variable short name which directly corresponds to a field name in the table. A subset of this variable is returned by this method according to the spatio-temporal cut parameters (below). Pass **'\*'** wild card to retrieve all fields in a table. A full list of variable short names can be found in [catalog](https://simonscmap.com/catalog).>> >> **dt1: string**>> Start date or datetime. This parameter sets the lower bound of the temporal cut. Example values: '2016-05-25' or '2017-12-10 17:25:00'>> >> **dt2: string**>> End date or datetime. This parameter sets the upper bound of the temporal cut. >> >> **lat1: float**>> Start latitude [degree N]. This parameter sets the lower bound of the meridional cut. Note latitude ranges from -90° to 90°.>> >> **lat2: float**>> End latitude [degree N]. This parameter sets the upper bound of the meridional cut. Note latitude ranges from -90° to 90°.>> >> **lon1: float**>> Start longitude [degree E]. This parameter sets the lower bound of the zonal cut. Note longitue ranges from -180° to 180°.>> >> **lon2: float**>> End longitude [degree E]. This parameter sets the upper bound of the zonal cut. Note longitue ranges from -180° to 180°.>> >> **depth1: float**>> Start depth [m]. This parameter sets the lower bound of the vertical cut. Note depth is a positive number (it is 0 at surface and grows towards ocean floor).>> >> **depth2: float**>> End depth [m]. This parameter sets the upper bound of the vertical cut. Note depth is a positive number (it is 0 at surface and grows towards ocean floor).>**Returns:** >> Pandas dataframe. Example:This example retrieves a subset of in-situ salinity measurements by [Argo floats](https://cmap.readthedocs.io/en/latest/catalog/datasets/Argo.htmlargo).
###Code
api.space_time(
table='tblArgoMerge_REP',
variable='argo_merge_salinity_adj',
dt1='2015-05-01',
dt2='2015-05-30',
lat1=28,
lat2=38,
lon1=-71,
lon2=-50,
depth1=0,
depth2=100
)
###Output
_____no_output_____
###Markdown
Table of Contents [*along_track(cruise, targetTables, targetVars, depth1, depth2, temporalTolerance, latTolerance, lonTolerance, depthTolerance)*](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/pycmap_match_cruise_track_datasets.htmlmatchcruise)This method colocalizes a cruise trajectory with the specified target variables. The matching results rely on the tolerance parameters because these parameters set the matching boundaries between the cruise trajectory and target datasets. Please note that the number of matching entries for each target variable might vary depending on the temporal and spatial resolutions of the target variable. In principle, if the cruise trajectory is fully covered by the target variable's spatio-temporal range, there should always be matching results if the tolerance parameters are larger than half of their corresponding spatial/temporal resolutions. Please explore the [catalog](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/pycmap_catalog.htmlgetcatalog) to find appropriate target variables to colocalize with the desired cruise. This method returns a dataframe containing the cruise trajectory joined with the target variable(s). > **Parameters:** >> **cruise: string**>> The official cruise name. If applicable, you may also use cruise "nickname" ('Diel', 'Gradients_1' ...). A full list of cruise names can be retrieved using [cruise](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/pycmap_catalog.htmlgetcatalog) method.>> >> **targetTables: list of string**>> Table names of the target datasets to be matched with the cruise trajectory. Notice cruise trajectory can be matched with multiple target datasets. A full list of table names can be found in [catalog](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/pycmap_catalog.htmlgetcatalog).>> >> **targetVars: list of string**>> Variable short names to be matched with the cruise trajectory. A full list of variable short names can be found in [catalog](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/pycmap_catalog.htmlgetcatalog).>> >> **depth1: float**>> Start depth [m]. This parameter sets the lower bound of the depth cut on the traget datasets. 'depth1' and 'depth2' allow matching a cruise trajectory (which is at the surface, hopefully!) with traget varaiables at lower depth. Note depth is a positive number (depth is 0 at surface and grows towards ocean floor).>> >> **depth2: float**>> End depth [m]. This parameter sets the upper bound of the depth cut on the traget datasets. Note depth is a positive number (depth is 0 at surface and grows towards ocean floor).>> >> **temporalTolerance: list of int**>> Temporal tolerance values between the cruise trajectory and target datasets. The size and order of values in this list should match those of targetTables. If only a single integer value is given, that would be applied to all target datasets. This parameter is in day units except when the target variable represents monthly climatology data in which case it is in month units. Notice fractional values are not supported in the current version.>> >> **latTolerance: list of float or int**>> Spatial tolerance values in meridional direction [deg] between the cruise trajectory and target datasets. The size and order of values in this list should match those of targetTables. If only a single float value is given, that would be applied to all target datasets. A "safe" value for this parameter can be slightly larger than the half of the traget variable's spatial resolution.>> >> **lonTolerance: list of float or int**>> Spatial tolerance values in zonal direction [deg] between the cruise trajectory and target datasets. The size and order of values in this list should match those of targetTables. If only a single float value is given, that would be applied to all target datasets. A "safe" value for this parameter can be slightly larger than the half of the traget variable's spatial resolution.>> >> **depthTolerance: list of float or int**>> Spatial tolerance values in vertical direction [m] between the cruise trajectory and target datasets. The size and order of values in this list should match those of targetTables. If only a single float value is given, that would be applied to all target datasets. >**Returns:** >> Pandas dataframe. Example:Colocalizes the Gradients_1 cruise with prochloro_abundance and prokaryote_c01_darwin_clim variables from the Seaflow and Darwin (climatology) Data sets, respectively.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import pycmap
api = pycmap.API()
df = api.along_track(
cruise='gradients_3',
targetTables=['tblSeaFlow', 'tblDarwin_Nutrient_Climatology'],
targetVars=['abundance_prochloro', 'PO4_darwin_clim'],
depth1=0,
depth2=5,
temporalTolerance=[0, 0],
latTolerance=[0.01, 0.25],
lonTolerance=[0.01, 0.25],
depthTolerance=[5, 5]
)
################# Simple Plot #################
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
c1, c2 = 'firebrick', 'slateblue'
t1, t2 = 'tblSeaFlow', 'tblDarwin_Nutrient_Climatology'
v1, v2 = 'abundance_prochloro', 'PO4_darwin_clim'
ax1.plot(df['lat'], df[v1], 'o', color=c1, markeredgewidth=0, label='SeaFlow', alpha=0.2)
ax1.tick_params(axis='y', labelcolor='r')
ax1.set_ylabel(v1 + api.get_unit(t1, v1), color='r')
ax2.plot(df['lat'], df[v2], 'o', color=c2, markeredgewidth=0, label='Darwin', alpha=0.2)
ax2.tick_params(axis='y', labelcolor='b')
ax2.set_ylabel(v2 + api.get_unit(t2, v2), color='b')
ax1.set_xlabel('Latitude')
fig.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Table of Contents Custom Colocalization:Sample(source, targets, replaceWithMonthlyClimatolog)Samples the targest datasets using the time-location of the source datasetReturns a dataframe containing the original source data and the joined colocalized target variables.> **Parameters:** >> **source: dataframe**>> A dataframe containing the source datasets (must have time-location columns).>> >> **targets: dict**>> A dcitionary containing the target table/variables and tolerance parameters. The items in `tolerances` list are: temporal tolerance [days], meridional tolerance [deg], >> zonal tolerance [deg], and vertical tolerance [m], repectively.>> Below is an example for `targets` parameter:>> targets = {>> "tblSST_AVHRR_OI_NRT": {>> "variables": ["sst"],>> "tolerances": [1, 0.25, 0.25, 5]>> },>> "tblAltimetry_REP": {>> "variables": ["sla", "adt", "ugosa", "vgosa"],>> "tolerances": [1, 0.25, 0.25, 5]>> }>> }>> >> **replaceWithMonthlyClimatolog: boolean**>> If `True`, monthly climatology of the target variables is colocalized when the target dataset's temporal range does not cover the source data. If `False`, only contemporaneous target data are colocalized. >> >**Returns:** >> Pandas dataframe.
###Code
targets = {
"tblSST_AVHRR_OI_NRT": {
"variables": ["sst"],
"tolerances": [0, 0.25, 0.25, 0]
}
}
pycmap.Sample(
source=api.get_dataset("tblAMT13_Chisholm"),
targets=targets,
replaceWithMonthlyClimatolog=True
)
###Output
Gathering metadata ....
Sampling starts.
282 / 283 ... sampling tblSST_AVHRR_OI_NRT278 / 283 ... sampling tblSST_AVHRR_OI_NRT181 / 283 ... sampling tblSST_AVHRR_OI_NRT/ 283 ... sampling tblSST_AVHRR_OI_NRT249 / 283 ... sampling tblSST_AVHRR_OI_NRT
Sampling ends.
###Markdown
Table of Contents [*datasets_with_ancillary()*](https://github.com/simonscmap/pycmap/blob/master/docs/DatasetsWithAncillary.ipynb)Returns a dataframe containing the list of data sets that have been automatically colocalized with ancillary variables (mostly environmental).A growing number of Simons CMAP datasets are being automatically colocalized with a large (100+) number of ancillary parameters derived from satellite and numerical model products.>**Returns:** >> Pandas dataframe.
###Code
api.datasets_with_ancillary()
###Output
_____no_output_____
###Markdown
Table of Contents [*get_dataset_with_ancillary(tableName)*](https://github.com/simonscmap/pycmap/blob/master/docs/RetrieveDatasetWithAncillary.ipynb)Returns the entire dataset joined with colocalized ancillary variables. The ancillary variable names are prefixed with `CMAP_`.Note that this method does not return the dataset metadata. Use the get_dataset_metadata method to get the dataset metadata.> **Parameters:** >> **tableName: string**>> The name of table associated with the dataset. A full list of table names can be found in the [Data Catalog](https://simonscmap.com/catalog).>> >**Returns:** >> Pandas dataframe.
###Code
api.get_dataset_with_ancillary("tblAMT13_Chisholm")
###Output
_____no_output_____
###Markdown
Table of Contents [*query(query)*](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_retrieval/pycmap_query.htmlquery)Simons CMAP datasets are hosted in a SQL database and pycmap package provides the user with a number of pre-developed methods to extract and retrieve subsets of the data. The rest of this documentation is dedicated to explore and explain these methods. In addition to the pre-developed methods, we intend to leave the database open to custom scan queries for interested users. This method takes a custom SQL query statement and returns the results in form of a Pandas dataframe. The full list of table names and variable names (fields) can be obtained using the [get_catalog()](https://cmap.readthedocs.io/en/latest/user_guide/API_ref/pycmap_api/data_retrieval/pycmap_catalog.htmlgetcatalog) method. In fact, one may use this very method to retrieve the table and field names: `query('EXEC uspCatalog')`. A Dataset is stored in a table and each table field represents a variable. All data tables have the following fields:* [time] [date or datetime] NOT NULL,* [lat] [float] NOT NULL,* [lon] [float] NOT NULL,* [depth] [float] NOT NULL, Note:Tables which represent a climatological dataset, such as 'tblDarwin_Nutrient_Climatology', will not have a 'time' field. Also, if a table represents a surface dataset, such as satellite products, there would be no 'depth' field. 'depth' is a positive number in meters unit; it is zero at the surface growing towards the ocean's floor. 'lat' and 'lon' are in degrees units, ranging from -90° to 90° and -180° to 180°, respectively.Please keep in mind that some of the datasets are massive in size (10s of TB), avoid queries without WHERE clause (`SELECT * FROM TABLENAME`). Always try to add some constraints on time, lat, lon, and depth fields (see the basic examples below). Moreover, the database hosts a wide range of predefined stored procedures and functions to streamline nearly all CMAP data services. For instance retrieving the catalog information is achieved using a single call of this procedure: *uspCatalog*. These predefined procedures can be called using the pycmap package (see example below). Alternatively, one may use any SQL client to execute these procedures to retrieve and visualize data (examples: [Azure Data Studio](https://docs.microsoft.com/en-us/sql/azure-data-studio/download?view=sql-server-ver15), or [Plotly Falcon](https://plot.ly/free-sql-client-download/)). Using the predefined procedures all CMAP data services are centralized at the database layer which dramatically facilitates the process of developing apps with different programming languages (pycmap, web app, cmap4r, ...). Please note that you can improve the current procedures or add new procedures by contributing at the [CMAP database repository](https://github.com/simonscmap/DB). Below is a selected list of stored procedures and functions, their arguments will be described in more details subsequently:* uspCatalog* uspSpaceTime* uspTimeSeries* uspDepthProfile* uspSectionMap* uspCruises* uspCruiseByName* uspCruiseBounds* uspWeekly* uspMonthly* uspQuarterly* uspAnnual* uspMatch* udfDatasetReferences* udfMetaData_NoRefHappy SQL Injection! Example:A sample stored procedure returning the list of all cruises hosted by Simons CMAP.
###Code
api.query('EXEC uspCruises')
###Output
_____no_output_____
###Markdown
Example:A sample query returning the timeseries of sea surface temperature (sst).
###Code
api.query(
'''
SELECT [time], AVG(lat) AS lat, AVG(lon) AS lon, AVG(sst) AS sst FROM tblsst_AVHRR_OI_NRT
WHERE
[time] BETWEEN '2016-06-01' AND '2016-10-01' AND
lat BETWEEN 23 AND 24 AND
lon BETWEEN -160 AND -158
GROUP BY [time]
ORDER BY [time]
'''
)
###Output
_____no_output_____ |
tutorials/streamlit_notebooks/CLASSIFICATION_EN_FAKENEWS.ipynb | ###Markdown
[](https://githubtocolab.com/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/CLASSIFICATION_EN_FAKENEWS.ipynb) **Detect fake news** 1. Colab Setup
###Code
# Install PySpark and Spark NLP
! pip install -q pyspark==3.1.2 spark-nlp
import pandas as pd
import numpy as np
import json
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from sparknlp.annotator import *
from sparknlp.base import *
import sparknlp
from sparknlp.pretrained import PretrainedPipeline
###Output
_____no_output_____
###Markdown
2. Start Spark Session
###Code
spark = sparknlp.start()
###Output
_____no_output_____
###Markdown
3. Select the DL model
###Code
### Select Model
model_name = 'classifierdl_use_fakenews'
###Output
_____no_output_____
###Markdown
4. Some sample examples
###Code
text_list=[
"""Donald Trump a KGB Spy? 11/02/2016 In today’s video, Christopher Greene of AMTV reports Hillary Clinton campaign accusation that Donald Trump is a KGB spy is about as weak and baseless a claim as a Salem witch hunt or McCarthy era trial. It’s only because Hillary Clinton is losing that she is lobbing conspiracy theory. Citizen Quasar The way I see it, one of two things will happen: 1. Trump will win by a landslide but the election will be stolen via electronic voting, just like I have been predicting for over a decade, and the American People will accept the skewed election results just like they accept the TSA into their crotches. 2. Somebody will bust a cap in Hillary’s @$$ killing her and the election will be postponed. Follow AMTV!""",
"""President Barack Obama said former Secretary of State Hillary Clinton’s use of a personal email server was a mistake, but that U.S. national security hadn’t been endangered. In his first extensive remarks on the controversy that has roiled the Democratic presidential primary, Mr. Obama said on CBS’s “60 Minutes” program that questions about Mrs. Clinton’s email arrangement were legitimate. “It is important for her to answer these questions to the satisfaction of the American public,” Mr. Obama said.""",
"""Abby Martin Exposes What Hillary Clinton Really Represents ‹ › Since 2011, VNN has operated as part of the Veterans Today Network ; a group that operates over 50 plus media, information and service online sites for U.S. Military Veterans. Morning Joe Destroys Corrupt Clinton Foundation (Laughable) “Total Corruption” By VNN on October 28, 2016 'Pay for Play' and 'Quid Pro Quo' 'Shut Down The Foundation' Inside the Clinton’s Foundation and Personal Gains They are bragging that they can shake down foundation clients, for Bill Clinton money… This is sleazy… Joe Scarborough. Follow the money.""",
"""President Barack Obama is ramping up efforts to convince individual House members to grant him fast-track authority to negotiate trade deals, focusing his efforts on a dwindling group of undecided Democratic lawmakers.But Democrats who have already backed the deal publicly said these members need to be convinced they are not trading away their own political futures for a vote on fast-track. Potentially decisive are moderate, pro-growth members of the New Democrat Coalition. Its vice-chair, Rep. Jim Himes (D., Conn.), spoke as recently as Monday to the president, after fielding calls from the White House during last week’s recess as well.""",
"""Most American spend over 9 hours a day using media. Is this making us dumber?? Are you living in a media induced trace? Do you know the truth of the world or do you know what the manipulators want you to know?! Professor Jerry Kroth (Ph. D. Psychology) examines the ties between advertising and factual knowledge. Most people can name every mascot of most companies, but they can not name hardly any historical figures. How did we get here? These are questions that Professor Kroth explains in this well thought out presentation. This talk is based on Dr. Kroth's recent book, "Duped! Delusion, Denial, and the end of the American dream." """,
"""Michael Brown’s parents plan to bring a civil lawsuit for the wrongful death of their son against Darren Wilson and the city of Ferguson, Missouri. The announcment came a day after the Justice Department released its report on the abuses of the city’s police department and said Wilson wouldn’t be charged for violating Brown’s civil rights. Brown family lawyers note that the burden of proof is lower in a civil case than the criminal cases that were considered by both the federal government and a St. Louis County grand jury.""",
"""It’s Going to Change RADICALLY With Silver – HUGE Demand Coming | Cliff High Data mining expert Cliff High says the economy is much worse than most people think, and that bubble is going to pop after Election Day. Inflation is also coming, and that will be very positive for precious metals . High contends, “ Gold and silver are going to rise relative to the falling currencies. Gold and silver in actual purchasing power will also rise. They won’t be saying an ounce of gold bought a good suit 100 years ago and an ounce of gold will buy a good suit now. That’s going to change, and it’s also going to change radically with silver . Also, in our data sets between 2019 and 2024 , silver becomes the metal to have… You need to have silver . 2017 Gold Pandas and 2017 Silver Pandas Are Now Available! Secure Your 2017 Panda Coins Today at SD Bullion!""",
"""Senate Majority Leader Mitch McConnell announced a "Plan B" to halt a nuclear deal that would lift sanctions against Iran.The measure, which Republican aides said likely would be voted on Thursday, would prevent President Obama from lifting the sanctions until Iran releases four jailed Americans and recognizes the right of Israel to exist.McConnell made the announcement as Democrats prepared for a second time to filibuster a resolution of disapproval of the nuclear deal. Soon after McConnell's remarks, 42 Democrats again filibustered the resolution, preventing it from getting the 60 votes needed to advance."My strong preference is for Democrats to simply allow an up-or-down vote on the president's Iran deal." McConnell said. "But if they're determined to make that impossible, then at the very least we should be able to provide some protection to Israel and long-overdue relief to Americans who've languished in Iranian custody for years. Either way, this debate will continue." """,
"""TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! fisher 5 mins ago News Comments Off on TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! Breaking! Breaking! Bad news for Hillary in Florida. Early voting numbers from Florida are showing that Republicans have cast 17,000 more votes than Democrats. *** 6 days before the Election in 2012, Democrats in Florida cast 39,000 more votes than Republicans. *** Today, six days before the election, Republicans have now cast 17,000 more votes than Democrats. Watch Trump in Miami, FL today: """,
"""Sen. Marco Rubio (R-Fla.) is adding a veteran New Hampshire political operative to his team as he continues mulling a possible 2016 presidential bid, the latest sign that he is seriously preparing to launch a campaign later this year.Jim Merrill, who worked for former GOP presidential nominee Mitt Romney and ran his 2008 and 2012 New Hampshire primary campaigns, joined Rubio’s fledgling campaign on Monday, aides to the senator said.Merrill will be joining Rubio’s Reclaim America PAC to focus on Rubio’s New Hampshire and broader Northeast political operations."Marco has always been well received in New Hampshire, and should he run for president, he would be very competitive there," Terry Sullivan, who runs Reclaim America, said in a statement. "Jim certainly knows how to win in New Hampshire and in the Northeast, and will be a great addition to our team at Reclaim America.”News of Merrill’s hire was first reported by The New York Times.""",
]
###Output
_____no_output_____
###Markdown
5. Define Spark NLP pipeline
###Code
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
use = UniversalSentenceEncoder.pretrained(lang="en") \
.setInputCols(["document"])\
.setOutputCol("sentence_embeddings")
document_classifier = ClassifierDLModel.pretrained(model_name)\
.setInputCols(['document', 'sentence_embeddings']).setOutputCol("class")
nlpPipeline = Pipeline(stages=[
documentAssembler,
use,
document_classifier
])
###Output
tfhub_use download started this may take some time.
Approximate size to download 923.7 MB
[OK!]
classifierdl_use_fakenews download started this may take some time.
Approximate size to download 21.4 MB
[OK!]
###Markdown
6. Run the pipeline
###Code
empty_df = spark.createDataFrame([['']]).toDF("text")
pipelineModel = nlpPipeline.fit(empty_df)
df = spark.createDataFrame(pd.DataFrame({"text":text_list}))
result = pipelineModel.transform(df)
###Output
_____no_output_____
###Markdown
7. Visualize results
###Code
result.select(F.explode(F.arrays_zip('class.result', 'document.result')).alias("cols")) \
.select(F.expr("cols['0']").alias("class"),
F.expr("cols['1']").alias("document")).show(truncate=False)
###Output
+-----+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|class|document |
+-----+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|FAKE |Donald Trump a KGB Spy? 11/02/2016 In today’s video, Christopher Greene of AMTV reports Hillary Clinton campaign accusation that Donald Trump is a KGB spy is about as weak and baseless a claim as a Salem witch hunt or McCarthy era trial. It’s only because Hillary Clinton is losing that she is lobbing conspiracy theory. Citizen Quasar The way I see it, one of two things will happen: 1. Trump will win by a landslide but the election will be stolen via electronic voting, just like I have been predicting for over a decade, and the American People will accept the skewed election results just like they accept the TSA into their crotches. 2. Somebody will bust a cap in Hillary’s @$$ killing her and the election will be postponed. Follow AMTV! |
|REAL |President Barack Obama said former Secretary of State Hillary Clinton’s use of a personal email server was a mistake, but that U.S. national security hadn’t been endangered. In his first extensive remarks on the controversy that has roiled the Democratic presidential primary, Mr. Obama said on CBS’s “60 Minutes” program that questions about Mrs. Clinton’s email arrangement were legitimate. “It is important for her to answer these questions to the satisfaction of the American public,” Mr. Obama said. |
|FAKE |Abby Martin Exposes What Hillary Clinton Really Represents ‹ › Since 2011, VNN has operated as part of the Veterans Today Network ; a group that operates over 50 plus media, information and service online sites for U.S. Military Veterans. Morning Joe Destroys Corrupt Clinton Foundation (Laughable) “Total Corruption” By VNN on October 28, 2016 'Pay for Play' and 'Quid Pro Quo' 'Shut Down The Foundation' Inside the Clinton’s Foundation and Personal Gains They are bragging that they can shake down foundation clients, for Bill Clinton money… This is sleazy… Joe Scarborough. Follow the money. |
|REAL |President Barack Obama is ramping up efforts to convince individual House members to grant him fast-track authority to negotiate trade deals, focusing his efforts on a dwindling group of undecided Democratic lawmakers.But Democrats who have already backed the deal publicly said these members need to be convinced they are not trading away their own political futures for a vote on fast-track. Potentially decisive are moderate, pro-growth members of the New Democrat Coalition. Its vice-chair, Rep. Jim Himes (D., Conn.), spoke as recently as Monday to the president, after fielding calls from the White House during last week’s recess as well. |
|FAKE |Most American spend over 9 hours a day using media. Is this making us dumber?? Are you living in a media induced trace? Do you know the truth of the world or do you know what the manipulators want you to know?! Professor Jerry Kroth (Ph. D. Psychology) examines the ties between advertising and factual knowledge. Most people can name every mascot of most companies, but they can not name hardly any historical figures. How did we get here? These are questions that Professor Kroth explains in this well thought out presentation. This talk is based on Dr. Kroth's recent book, "Duped! Delusion, Denial, and the end of the American dream." |
|REAL |Michael Brown’s parents plan to bring a civil lawsuit for the wrongful death of their son against Darren Wilson and the city of Ferguson, Missouri. The announcment came a day after the Justice Department released its report on the abuses of the city’s police department and said Wilson wouldn’t be charged for violating Brown’s civil rights. Brown family lawyers note that the burden of proof is lower in a civil case than the criminal cases that were considered by both the federal government and a St. Louis County grand jury. |
|FAKE |It’s Going to Change RADICALLY With Silver – HUGE Demand Coming | Cliff High Data mining expert Cliff High says the economy is much worse than most people think, and that bubble is going to pop after Election Day. Inflation is also coming, and that will be very positive for precious metals . High contends, “ Gold and silver are going to rise relative to the falling currencies. Gold and silver in actual purchasing power will also rise. They won’t be saying an ounce of gold bought a good suit 100 years ago and an ounce of gold will buy a good suit now. That’s going to change, and it’s also going to change radically with silver . Also, in our data sets between 2019 and 2024 , silver becomes the metal to have… You need to have silver . 2017 Gold Pandas and 2017 Silver Pandas Are Now Available! Secure Your 2017 Panda Coins Today at SD Bullion! |
|REAL |Senate Majority Leader Mitch McConnell announced a "Plan B" to halt a nuclear deal that would lift sanctions against Iran.The measure, which Republican aides said likely would be voted on Thursday, would prevent President Obama from lifting the sanctions until Iran releases four jailed Americans and recognizes the right of Israel to exist.McConnell made the announcement as Democrats prepared for a second time to filibuster a resolution of disapproval of the nuclear deal. Soon after McConnell's remarks, 42 Democrats again filibustered the resolution, preventing it from getting the 60 votes needed to advance."My strong preference is for Democrats to simply allow an up-or-down vote on the president's Iran deal." McConnell said. "But if they're determined to make that impossible, then at the very least we should be able to provide some protection to Israel and long-overdue relief to Americans who've languished in Iranian custody for years. Either way, this debate will continue." |
|FAKE |TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! fisher 5 mins ago News Comments Off on TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! Breaking! Breaking! Bad news for Hillary in Florida. Early voting numbers from Florida are showing that Republicans have cast 17,000 more votes than Democrats. *** 6 days before the Election in 2012, Democrats in Florida cast 39,000 more votes than Republicans. *** Today, six days before the election, Republicans have now cast 17,000 more votes than Democrats. Watch Trump in Miami, FL today: |
|REAL |Sen. Marco Rubio (R-Fla.) is adding a veteran New Hampshire political operative to his team as he continues mulling a possible 2016 presidential bid, the latest sign that he is seriously preparing to launch a campaign later this year.Jim Merrill, who worked for former GOP presidential nominee Mitt Romney and ran his 2008 and 2012 New Hampshire primary campaigns, joined Rubio’s fledgling campaign on Monday, aides to the senator said.Merrill will be joining Rubio’s Reclaim America PAC to focus on Rubio’s New Hampshire and broader Northeast political operations."Marco has always been well received in New Hampshire, and should he run for president, he would be very competitive there," Terry Sullivan, who runs Reclaim America, said in a statement. "Jim certainly knows how to win in New Hampshire and in the Northeast, and will be a great addition to our team at Reclaim America.”News of Merrill’s hire was first reported by The New York Times. |
+-----+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
###Markdown
[](https://githubtocolab.com/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/CLASSIFICATION_EN_FAKENEWS.ipynb) **Detect fake news** 1. Colab Setup
###Code
!wget http://setup.johnsnowlabs.com/colab.sh -O - | bash
# !bash colab.sh
# -p is for pyspark
# -s is for spark-nlp
# !bash colab.sh -p 3.1.1 -s 3.0.1
# by default they are set to the latest
import pandas as pd
import numpy as np
import json
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from sparknlp.annotator import *
from sparknlp.base import *
import sparknlp
from sparknlp.pretrained import PretrainedPipeline
###Output
_____no_output_____
###Markdown
2. Start Spark Session
###Code
spark = sparknlp.start()
###Output
_____no_output_____
###Markdown
3. Select the DL model
###Code
### Select Model
model_name = 'classifierdl_use_fakenews'
###Output
_____no_output_____
###Markdown
4. Some sample examples
###Code
text_list=[
"""Donald Trump a KGB Spy? 11/02/2016 In today’s video, Christopher Greene of AMTV reports Hillary Clinton campaign accusation that Donald Trump is a KGB spy is about as weak and baseless a claim as a Salem witch hunt or McCarthy era trial. It’s only because Hillary Clinton is losing that she is lobbing conspiracy theory. Citizen Quasar The way I see it, one of two things will happen: 1. Trump will win by a landslide but the election will be stolen via electronic voting, just like I have been predicting for over a decade, and the American People will accept the skewed election results just like they accept the TSA into their crotches. 2. Somebody will bust a cap in Hillary’s @$$ killing her and the election will be postponed. Follow AMTV!""",
"""President Barack Obama said former Secretary of State Hillary Clinton’s use of a personal email server was a mistake, but that U.S. national security hadn’t been endangered. In his first extensive remarks on the controversy that has roiled the Democratic presidential primary, Mr. Obama said on CBS’s “60 Minutes” program that questions about Mrs. Clinton’s email arrangement were legitimate. “It is important for her to answer these questions to the satisfaction of the American public,” Mr. Obama said.""",
"""Abby Martin Exposes What Hillary Clinton Really Represents ‹ › Since 2011, VNN has operated as part of the Veterans Today Network ; a group that operates over 50 plus media, information and service online sites for U.S. Military Veterans. Morning Joe Destroys Corrupt Clinton Foundation (Laughable) “Total Corruption” By VNN on October 28, 2016 'Pay for Play' and 'Quid Pro Quo' 'Shut Down The Foundation' Inside the Clinton’s Foundation and Personal Gains They are bragging that they can shake down foundation clients, for Bill Clinton money… This is sleazy… Joe Scarborough. Follow the money.""",
"""President Barack Obama is ramping up efforts to convince individual House members to grant him fast-track authority to negotiate trade deals, focusing his efforts on a dwindling group of undecided Democratic lawmakers.But Democrats who have already backed the deal publicly said these members need to be convinced they are not trading away their own political futures for a vote on fast-track. Potentially decisive are moderate, pro-growth members of the New Democrat Coalition. Its vice-chair, Rep. Jim Himes (D., Conn.), spoke as recently as Monday to the president, after fielding calls from the White House during last week’s recess as well.""",
"""Most American spend over 9 hours a day using media. Is this making us dumber?? Are you living in a media induced trace? Do you know the truth of the world or do you know what the manipulators want you to know?! Professor Jerry Kroth (Ph. D. Psychology) examines the ties between advertising and factual knowledge. Most people can name every mascot of most companies, but they can not name hardly any historical figures. How did we get here? These are questions that Professor Kroth explains in this well thought out presentation. This talk is based on Dr. Kroth's recent book, "Duped! Delusion, Denial, and the end of the American dream." """,
"""Michael Brown’s parents plan to bring a civil lawsuit for the wrongful death of their son against Darren Wilson and the city of Ferguson, Missouri. The announcment came a day after the Justice Department released its report on the abuses of the city’s police department and said Wilson wouldn’t be charged for violating Brown’s civil rights. Brown family lawyers note that the burden of proof is lower in a civil case than the criminal cases that were considered by both the federal government and a St. Louis County grand jury.""",
"""It’s Going to Change RADICALLY With Silver – HUGE Demand Coming | Cliff High Data mining expert Cliff High says the economy is much worse than most people think, and that bubble is going to pop after Election Day. Inflation is also coming, and that will be very positive for precious metals . High contends, “ Gold and silver are going to rise relative to the falling currencies. Gold and silver in actual purchasing power will also rise. They won’t be saying an ounce of gold bought a good suit 100 years ago and an ounce of gold will buy a good suit now. That’s going to change, and it’s also going to change radically with silver . Also, in our data sets between 2019 and 2024 , silver becomes the metal to have… You need to have silver . 2017 Gold Pandas and 2017 Silver Pandas Are Now Available! Secure Your 2017 Panda Coins Today at SD Bullion!""",
"""Senate Majority Leader Mitch McConnell announced a "Plan B" to halt a nuclear deal that would lift sanctions against Iran.The measure, which Republican aides said likely would be voted on Thursday, would prevent President Obama from lifting the sanctions until Iran releases four jailed Americans and recognizes the right of Israel to exist.McConnell made the announcement as Democrats prepared for a second time to filibuster a resolution of disapproval of the nuclear deal. Soon after McConnell's remarks, 42 Democrats again filibustered the resolution, preventing it from getting the 60 votes needed to advance."My strong preference is for Democrats to simply allow an up-or-down vote on the president's Iran deal." McConnell said. "But if they're determined to make that impossible, then at the very least we should be able to provide some protection to Israel and long-overdue relief to Americans who've languished in Iranian custody for years. Either way, this debate will continue." """,
"""TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! fisher 5 mins ago News Comments Off on TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! Breaking! Breaking! Bad news for Hillary in Florida. Early voting numbers from Florida are showing that Republicans have cast 17,000 more votes than Democrats. *** 6 days before the Election in 2012, Democrats in Florida cast 39,000 more votes than Republicans. *** Today, six days before the election, Republicans have now cast 17,000 more votes than Democrats. Watch Trump in Miami, FL today: """,
"""Sen. Marco Rubio (R-Fla.) is adding a veteran New Hampshire political operative to his team as he continues mulling a possible 2016 presidential bid, the latest sign that he is seriously preparing to launch a campaign later this year.Jim Merrill, who worked for former GOP presidential nominee Mitt Romney and ran his 2008 and 2012 New Hampshire primary campaigns, joined Rubio’s fledgling campaign on Monday, aides to the senator said.Merrill will be joining Rubio’s Reclaim America PAC to focus on Rubio’s New Hampshire and broader Northeast political operations."Marco has always been well received in New Hampshire, and should he run for president, he would be very competitive there," Terry Sullivan, who runs Reclaim America, said in a statement. "Jim certainly knows how to win in New Hampshire and in the Northeast, and will be a great addition to our team at Reclaim America.”News of Merrill’s hire was first reported by The New York Times.""",
]
###Output
_____no_output_____
###Markdown
5. Define Spark NLP pipeline
###Code
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
use = UniversalSentenceEncoder.pretrained(lang="en") \
.setInputCols(["document"])\
.setOutputCol("sentence_embeddings")
document_classifier = ClassifierDLModel.pretrained(model_name)\
.setInputCols(['document', 'sentence_embeddings']).setOutputCol("class")
nlpPipeline = Pipeline(stages=[
documentAssembler,
use,
document_classifier
])
###Output
tfhub_use download started this may take some time.
Approximate size to download 923.7 MB
[OK!]
classifierdl_use_fakenews download started this may take some time.
Approximate size to download 21.4 MB
[OK!]
###Markdown
6. Run the pipeline
###Code
empty_df = spark.createDataFrame([['']]).toDF("text")
pipelineModel = nlpPipeline.fit(empty_df)
df = spark.createDataFrame(pd.DataFrame({"text":text_list}))
result = pipelineModel.transform(df)
###Output
_____no_output_____
###Markdown
7. Visualize results
###Code
result.select(F.explode(F.arrays_zip('class.result', 'document.result')).alias("cols")) \
.select(F.expr("cols['0']").alias("class"),
F.expr("cols['1']").alias("document")).show(truncate=False)
###Output
+-----+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|class|document |
+-----+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|FAKE |Donald Trump a KGB Spy? 11/02/2016 In today’s video, Christopher Greene of AMTV reports Hillary Clinton campaign accusation that Donald Trump is a KGB spy is about as weak and baseless a claim as a Salem witch hunt or McCarthy era trial. It’s only because Hillary Clinton is losing that she is lobbing conspiracy theory. Citizen Quasar The way I see it, one of two things will happen: 1. Trump will win by a landslide but the election will be stolen via electronic voting, just like I have been predicting for over a decade, and the American People will accept the skewed election results just like they accept the TSA into their crotches. 2. Somebody will bust a cap in Hillary’s @$$ killing her and the election will be postponed. Follow AMTV! |
|REAL |President Barack Obama said former Secretary of State Hillary Clinton’s use of a personal email server was a mistake, but that U.S. national security hadn’t been endangered. In his first extensive remarks on the controversy that has roiled the Democratic presidential primary, Mr. Obama said on CBS’s “60 Minutes” program that questions about Mrs. Clinton’s email arrangement were legitimate. “It is important for her to answer these questions to the satisfaction of the American public,” Mr. Obama said. |
|FAKE |Abby Martin Exposes What Hillary Clinton Really Represents ‹ › Since 2011, VNN has operated as part of the Veterans Today Network ; a group that operates over 50 plus media, information and service online sites for U.S. Military Veterans. Morning Joe Destroys Corrupt Clinton Foundation (Laughable) “Total Corruption” By VNN on October 28, 2016 'Pay for Play' and 'Quid Pro Quo' 'Shut Down The Foundation' Inside the Clinton’s Foundation and Personal Gains They are bragging that they can shake down foundation clients, for Bill Clinton money… This is sleazy… Joe Scarborough. Follow the money. |
|REAL |President Barack Obama is ramping up efforts to convince individual House members to grant him fast-track authority to negotiate trade deals, focusing his efforts on a dwindling group of undecided Democratic lawmakers.But Democrats who have already backed the deal publicly said these members need to be convinced they are not trading away their own political futures for a vote on fast-track. Potentially decisive are moderate, pro-growth members of the New Democrat Coalition. Its vice-chair, Rep. Jim Himes (D., Conn.), spoke as recently as Monday to the president, after fielding calls from the White House during last week’s recess as well. |
|FAKE |Most American spend over 9 hours a day using media. Is this making us dumber?? Are you living in a media induced trace? Do you know the truth of the world or do you know what the manipulators want you to know?! Professor Jerry Kroth (Ph. D. Psychology) examines the ties between advertising and factual knowledge. Most people can name every mascot of most companies, but they can not name hardly any historical figures. How did we get here? These are questions that Professor Kroth explains in this well thought out presentation. This talk is based on Dr. Kroth's recent book, "Duped! Delusion, Denial, and the end of the American dream." |
|REAL |Michael Brown’s parents plan to bring a civil lawsuit for the wrongful death of their son against Darren Wilson and the city of Ferguson, Missouri. The announcment came a day after the Justice Department released its report on the abuses of the city’s police department and said Wilson wouldn’t be charged for violating Brown’s civil rights. Brown family lawyers note that the burden of proof is lower in a civil case than the criminal cases that were considered by both the federal government and a St. Louis County grand jury. |
|FAKE |It’s Going to Change RADICALLY With Silver – HUGE Demand Coming | Cliff High Data mining expert Cliff High says the economy is much worse than most people think, and that bubble is going to pop after Election Day. Inflation is also coming, and that will be very positive for precious metals . High contends, “ Gold and silver are going to rise relative to the falling currencies. Gold and silver in actual purchasing power will also rise. They won’t be saying an ounce of gold bought a good suit 100 years ago and an ounce of gold will buy a good suit now. That’s going to change, and it’s also going to change radically with silver . Also, in our data sets between 2019 and 2024 , silver becomes the metal to have… You need to have silver . 2017 Gold Pandas and 2017 Silver Pandas Are Now Available! Secure Your 2017 Panda Coins Today at SD Bullion! |
|REAL |Senate Majority Leader Mitch McConnell announced a "Plan B" to halt a nuclear deal that would lift sanctions against Iran.The measure, which Republican aides said likely would be voted on Thursday, would prevent President Obama from lifting the sanctions until Iran releases four jailed Americans and recognizes the right of Israel to exist.McConnell made the announcement as Democrats prepared for a second time to filibuster a resolution of disapproval of the nuclear deal. Soon after McConnell's remarks, 42 Democrats again filibustered the resolution, preventing it from getting the 60 votes needed to advance."My strong preference is for Democrats to simply allow an up-or-down vote on the president's Iran deal." McConnell said. "But if they're determined to make that impossible, then at the very least we should be able to provide some protection to Israel and long-overdue relief to Americans who've languished in Iranian custody for years. Either way, this debate will continue." |
|FAKE |TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! fisher 5 mins ago News Comments Off on TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! Breaking! Breaking! Bad news for Hillary in Florida. Early voting numbers from Florida are showing that Republicans have cast 17,000 more votes than Democrats. *** 6 days before the Election in 2012, Democrats in Florida cast 39,000 more votes than Republicans. *** Today, six days before the election, Republicans have now cast 17,000 more votes than Democrats. Watch Trump in Miami, FL today: |
|REAL |Sen. Marco Rubio (R-Fla.) is adding a veteran New Hampshire political operative to his team as he continues mulling a possible 2016 presidential bid, the latest sign that he is seriously preparing to launch a campaign later this year.Jim Merrill, who worked for former GOP presidential nominee Mitt Romney and ran his 2008 and 2012 New Hampshire primary campaigns, joined Rubio’s fledgling campaign on Monday, aides to the senator said.Merrill will be joining Rubio’s Reclaim America PAC to focus on Rubio’s New Hampshire and broader Northeast political operations."Marco has always been well received in New Hampshire, and should he run for president, he would be very competitive there," Terry Sullivan, who runs Reclaim America, said in a statement. "Jim certainly knows how to win in New Hampshire and in the Northeast, and will be a great addition to our team at Reclaim America.”News of Merrill’s hire was first reported by The New York Times. |
+-----+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
###Markdown
[](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/CLASSIFICATION_EN_FAKENEWS.ipynb) **Detect fake news** 1. Colab Setup
###Code
# Install java
!apt-get update -qq
!apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
!java -version
# Install pyspark
!pip install --ignore-installed -q pyspark==2.4.4
# Install Sparknlp
!pip install --ignore-installed spark-nlp
import pandas as pd
import numpy as np
import os
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
import json
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from sparknlp.annotator import *
from sparknlp.base import *
import sparknlp
from sparknlp.pretrained import PretrainedPipeline
###Output
_____no_output_____
###Markdown
2. Start Spark Session
###Code
spark = sparknlp.start()
###Output
_____no_output_____
###Markdown
3. Select the DL model
###Code
### Select Model
model_name = 'classifierdl_use_fakenews'
###Output
_____no_output_____
###Markdown
4. Some sample examples
###Code
text_list=[
"""Donald Trump a KGB Spy? 11/02/2016 In today’s video, Christopher Greene of AMTV reports Hillary Clinton campaign accusation that Donald Trump is a KGB spy is about as weak and baseless a claim as a Salem witch hunt or McCarthy era trial. It’s only because Hillary Clinton is losing that she is lobbing conspiracy theory. Citizen Quasar The way I see it, one of two things will happen: 1. Trump will win by a landslide but the election will be stolen via electronic voting, just like I have been predicting for over a decade, and the American People will accept the skewed election results just like they accept the TSA into their crotches. 2. Somebody will bust a cap in Hillary’s @$$ killing her and the election will be postponed. Follow AMTV!""",
"""President Barack Obama said former Secretary of State Hillary Clinton’s use of a personal email server was a mistake, but that U.S. national security hadn’t been endangered. In his first extensive remarks on the controversy that has roiled the Democratic presidential primary, Mr. Obama said on CBS’s “60 Minutes” program that questions about Mrs. Clinton’s email arrangement were legitimate. “It is important for her to answer these questions to the satisfaction of the American public,” Mr. Obama said.""",
"""Abby Martin Exposes What Hillary Clinton Really Represents ‹ › Since 2011, VNN has operated as part of the Veterans Today Network ; a group that operates over 50 plus media, information and service online sites for U.S. Military Veterans. Morning Joe Destroys Corrupt Clinton Foundation (Laughable) “Total Corruption” By VNN on October 28, 2016 'Pay for Play' and 'Quid Pro Quo' 'Shut Down The Foundation' Inside the Clinton’s Foundation and Personal Gains They are bragging that they can shake down foundation clients, for Bill Clinton money… This is sleazy… Joe Scarborough. Follow the money.""",
"""President Barack Obama is ramping up efforts to convince individual House members to grant him fast-track authority to negotiate trade deals, focusing his efforts on a dwindling group of undecided Democratic lawmakers.But Democrats who have already backed the deal publicly said these members need to be convinced they are not trading away their own political futures for a vote on fast-track. Potentially decisive are moderate, pro-growth members of the New Democrat Coalition. Its vice-chair, Rep. Jim Himes (D., Conn.), spoke as recently as Monday to the president, after fielding calls from the White House during last week’s recess as well.""",
"""Most American spend over 9 hours a day using media. Is this making us dumber?? Are you living in a media induced trace? Do you know the truth of the world or do you know what the manipulators want you to know?! Professor Jerry Kroth (Ph. D. Psychology) examines the ties between advertising and factual knowledge. Most people can name every mascot of most companies, but they can not name hardly any historical figures. How did we get here? These are questions that Professor Kroth explains in this well thought out presentation. This talk is based on Dr. Kroth's recent book, "Duped! Delusion, Denial, and the end of the American dream." """,
"""Michael Brown’s parents plan to bring a civil lawsuit for the wrongful death of their son against Darren Wilson and the city of Ferguson, Missouri. The announcment came a day after the Justice Department released its report on the abuses of the city’s police department and said Wilson wouldn’t be charged for violating Brown’s civil rights. Brown family lawyers note that the burden of proof is lower in a civil case than the criminal cases that were considered by both the federal government and a St. Louis County grand jury.""",
"""It’s Going to Change RADICALLY With Silver – HUGE Demand Coming | Cliff High Data mining expert Cliff High says the economy is much worse than most people think, and that bubble is going to pop after Election Day. Inflation is also coming, and that will be very positive for precious metals . High contends, “ Gold and silver are going to rise relative to the falling currencies. Gold and silver in actual purchasing power will also rise. They won’t be saying an ounce of gold bought a good suit 100 years ago and an ounce of gold will buy a good suit now. That’s going to change, and it’s also going to change radically with silver . Also, in our data sets between 2019 and 2024 , silver becomes the metal to have… You need to have silver . 2017 Gold Pandas and 2017 Silver Pandas Are Now Available! Secure Your 2017 Panda Coins Today at SD Bullion!""",
"""Senate Majority Leader Mitch McConnell announced a "Plan B" to halt a nuclear deal that would lift sanctions against Iran.The measure, which Republican aides said likely would be voted on Thursday, would prevent President Obama from lifting the sanctions until Iran releases four jailed Americans and recognizes the right of Israel to exist.McConnell made the announcement as Democrats prepared for a second time to filibuster a resolution of disapproval of the nuclear deal. Soon after McConnell's remarks, 42 Democrats again filibustered the resolution, preventing it from getting the 60 votes needed to advance."My strong preference is for Democrats to simply allow an up-or-down vote on the president's Iran deal." McConnell said. "But if they're determined to make that impossible, then at the very least we should be able to provide some protection to Israel and long-overdue relief to Americans who've languished in Iranian custody for years. Either way, this debate will continue." """,
"""TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! fisher 5 mins ago News Comments Off on TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! Breaking! Breaking! Bad news for Hillary in Florida. Early voting numbers from Florida are showing that Republicans have cast 17,000 more votes than Democrats. *** 6 days before the Election in 2012, Democrats in Florida cast 39,000 more votes than Republicans. *** Today, six days before the election, Republicans have now cast 17,000 more votes than Democrats. Watch Trump in Miami, FL today: """,
"""Sen. Marco Rubio (R-Fla.) is adding a veteran New Hampshire political operative to his team as he continues mulling a possible 2016 presidential bid, the latest sign that he is seriously preparing to launch a campaign later this year.Jim Merrill, who worked for former GOP presidential nominee Mitt Romney and ran his 2008 and 2012 New Hampshire primary campaigns, joined Rubio’s fledgling campaign on Monday, aides to the senator said.Merrill will be joining Rubio’s Reclaim America PAC to focus on Rubio’s New Hampshire and broader Northeast political operations."Marco has always been well received in New Hampshire, and should he run for president, he would be very competitive there," Terry Sullivan, who runs Reclaim America, said in a statement. "Jim certainly knows how to win in New Hampshire and in the Northeast, and will be a great addition to our team at Reclaim America.”News of Merrill’s hire was first reported by The New York Times.""",
]
###Output
_____no_output_____
###Markdown
5. Define Spark NLP pipeline
###Code
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
use = UniversalSentenceEncoder.pretrained(lang="en") \
.setInputCols(["document"])\
.setOutputCol("sentence_embeddings")
document_classifier = ClassifierDLModel.pretrained(model_name)\
.setInputCols(['document', 'sentence_embeddings']).setOutputCol("class")
nlpPipeline = Pipeline(stages=[
documentAssembler,
use,
document_classifier
])
###Output
tfhub_use download started this may take some time.
Approximate size to download 923.7 MB
[OK!]
classifierdl_use_fakenews download started this may take some time.
Approximate size to download 21.4 MB
[OK!]
###Markdown
6. Run the pipeline
###Code
empty_df = spark.createDataFrame([['']]).toDF("text")
pipelineModel = nlpPipeline.fit(empty_df)
df = spark.createDataFrame(pd.DataFrame({"text":text_list}))
result = pipelineModel.transform(df)
###Output
_____no_output_____
###Markdown
7. Visualize results
###Code
result.select(F.explode(F.arrays_zip('class.result', 'document.result')).alias("cols")) \
.select(F.expr("cols['0']").alias("class"),
F.expr("cols['1']").alias("document")).show(truncate=False)
###Output
+-----+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|class|document |
+-----+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|FAKE |Donald Trump a KGB Spy? 11/02/2016 In today’s video, Christopher Greene of AMTV reports Hillary Clinton campaign accusation that Donald Trump is a KGB spy is about as weak and baseless a claim as a Salem witch hunt or McCarthy era trial. It’s only because Hillary Clinton is losing that she is lobbing conspiracy theory. Citizen Quasar The way I see it, one of two things will happen: 1. Trump will win by a landslide but the election will be stolen via electronic voting, just like I have been predicting for over a decade, and the American People will accept the skewed election results just like they accept the TSA into their crotches. 2. Somebody will bust a cap in Hillary’s @$$ killing her and the election will be postponed. Follow AMTV! |
|REAL |President Barack Obama said former Secretary of State Hillary Clinton’s use of a personal email server was a mistake, but that U.S. national security hadn’t been endangered. In his first extensive remarks on the controversy that has roiled the Democratic presidential primary, Mr. Obama said on CBS’s “60 Minutes” program that questions about Mrs. Clinton’s email arrangement were legitimate. “It is important for her to answer these questions to the satisfaction of the American public,” Mr. Obama said. |
|FAKE |Abby Martin Exposes What Hillary Clinton Really Represents ‹ › Since 2011, VNN has operated as part of the Veterans Today Network ; a group that operates over 50 plus media, information and service online sites for U.S. Military Veterans. Morning Joe Destroys Corrupt Clinton Foundation (Laughable) “Total Corruption” By VNN on October 28, 2016 'Pay for Play' and 'Quid Pro Quo' 'Shut Down The Foundation' Inside the Clinton’s Foundation and Personal Gains They are bragging that they can shake down foundation clients, for Bill Clinton money… This is sleazy… Joe Scarborough. Follow the money. |
|REAL |President Barack Obama is ramping up efforts to convince individual House members to grant him fast-track authority to negotiate trade deals, focusing his efforts on a dwindling group of undecided Democratic lawmakers.But Democrats who have already backed the deal publicly said these members need to be convinced they are not trading away their own political futures for a vote on fast-track. Potentially decisive are moderate, pro-growth members of the New Democrat Coalition. Its vice-chair, Rep. Jim Himes (D., Conn.), spoke as recently as Monday to the president, after fielding calls from the White House during last week’s recess as well. |
|FAKE |Most American spend over 9 hours a day using media. Is this making us dumber?? Are you living in a media induced trace? Do you know the truth of the world or do you know what the manipulators want you to know?! Professor Jerry Kroth (Ph. D. Psychology) examines the ties between advertising and factual knowledge. Most people can name every mascot of most companies, but they can not name hardly any historical figures. How did we get here? These are questions that Professor Kroth explains in this well thought out presentation. This talk is based on Dr. Kroth's recent book, "Duped! Delusion, Denial, and the end of the American dream." |
|REAL |Michael Brown’s parents plan to bring a civil lawsuit for the wrongful death of their son against Darren Wilson and the city of Ferguson, Missouri. The announcment came a day after the Justice Department released its report on the abuses of the city’s police department and said Wilson wouldn’t be charged for violating Brown’s civil rights. Brown family lawyers note that the burden of proof is lower in a civil case than the criminal cases that were considered by both the federal government and a St. Louis County grand jury. |
|FAKE |It’s Going to Change RADICALLY With Silver – HUGE Demand Coming | Cliff High Data mining expert Cliff High says the economy is much worse than most people think, and that bubble is going to pop after Election Day. Inflation is also coming, and that will be very positive for precious metals . High contends, “ Gold and silver are going to rise relative to the falling currencies. Gold and silver in actual purchasing power will also rise. They won’t be saying an ounce of gold bought a good suit 100 years ago and an ounce of gold will buy a good suit now. That’s going to change, and it’s also going to change radically with silver . Also, in our data sets between 2019 and 2024 , silver becomes the metal to have… You need to have silver . 2017 Gold Pandas and 2017 Silver Pandas Are Now Available! Secure Your 2017 Panda Coins Today at SD Bullion! |
|REAL |Senate Majority Leader Mitch McConnell announced a "Plan B" to halt a nuclear deal that would lift sanctions against Iran.The measure, which Republican aides said likely would be voted on Thursday, would prevent President Obama from lifting the sanctions until Iran releases four jailed Americans and recognizes the right of Israel to exist.McConnell made the announcement as Democrats prepared for a second time to filibuster a resolution of disapproval of the nuclear deal. Soon after McConnell's remarks, 42 Democrats again filibustered the resolution, preventing it from getting the 60 votes needed to advance."My strong preference is for Democrats to simply allow an up-or-down vote on the president's Iran deal." McConnell said. "But if they're determined to make that impossible, then at the very least we should be able to provide some protection to Israel and long-overdue relief to Americans who've languished in Iranian custody for years. Either way, this debate will continue." |
|FAKE |TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! fisher 5 mins ago News Comments Off on TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! Breaking! Breaking! Bad news for Hillary in Florida. Early voting numbers from Florida are showing that Republicans have cast 17,000 more votes than Democrats. *** 6 days before the Election in 2012, Democrats in Florida cast 39,000 more votes than Republicans. *** Today, six days before the election, Republicans have now cast 17,000 more votes than Democrats. Watch Trump in Miami, FL today: |
|REAL |Sen. Marco Rubio (R-Fla.) is adding a veteran New Hampshire political operative to his team as he continues mulling a possible 2016 presidential bid, the latest sign that he is seriously preparing to launch a campaign later this year.Jim Merrill, who worked for former GOP presidential nominee Mitt Romney and ran his 2008 and 2012 New Hampshire primary campaigns, joined Rubio’s fledgling campaign on Monday, aides to the senator said.Merrill will be joining Rubio’s Reclaim America PAC to focus on Rubio’s New Hampshire and broader Northeast political operations."Marco has always been well received in New Hampshire, and should he run for president, he would be very competitive there," Terry Sullivan, who runs Reclaim America, said in a statement. "Jim certainly knows how to win in New Hampshire and in the Northeast, and will be a great addition to our team at Reclaim America.”News of Merrill’s hire was first reported by The New York Times. |
+-----+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
###Markdown
[](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/CLASSIFICATION_EN_FAKENEWS.ipynb) **Detect fake news** 0. Colab Setup
###Code
!sudo apt-get install openjdk-8-jdk
!java -version
!pip install --ignore-installed -q pyspark==2.4.4
!pip install spark-nlp
import pandas as pd
import numpy as np
import os
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
import json
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from sparknlp.annotator import *
from sparknlp.base import *
import sparknlp
from sparknlp.pretrained import PretrainedPipeline
###Output
_____no_output_____
###Markdown
1. Start Spark Session
###Code
spark = sparknlp.start()
###Output
_____no_output_____
###Markdown
2. Select the DL model
###Code
### Select Model
model_name = 'classifierdl_use_fakenews'
###Output
_____no_output_____
###Markdown
3. Some sample examples
###Code
text_list=[
"""Donald Trump a KGB Spy? 11/02/2016 In today’s video, Christopher Greene of AMTV reports Hillary Clinton campaign accusation that Donald Trump is a KGB spy is about as weak and baseless a claim as a Salem witch hunt or McCarthy era trial. It’s only because Hillary Clinton is losing that she is lobbing conspiracy theory. Citizen Quasar The way I see it, one of two things will happen: 1. Trump will win by a landslide but the election will be stolen via electronic voting, just like I have been predicting for over a decade, and the American People will accept the skewed election results just like they accept the TSA into their crotches. 2. Somebody will bust a cap in Hillary’s @$$ killing her and the election will be postponed. Follow AMTV!""",
"""President Barack Obama said former Secretary of State Hillary Clinton’s use of a personal email server was a mistake, but that U.S. national security hadn’t been endangered. In his first extensive remarks on the controversy that has roiled the Democratic presidential primary, Mr. Obama said on CBS’s “60 Minutes” program that questions about Mrs. Clinton’s email arrangement were legitimate. “It is important for her to answer these questions to the satisfaction of the American public,” Mr. Obama said.""",
"""Abby Martin Exposes What Hillary Clinton Really Represents ‹ › Since 2011, VNN has operated as part of the Veterans Today Network ; a group that operates over 50 plus media, information and service online sites for U.S. Military Veterans. Morning Joe Destroys Corrupt Clinton Foundation (Laughable) “Total Corruption” By VNN on October 28, 2016 'Pay for Play' and 'Quid Pro Quo' 'Shut Down The Foundation' Inside the Clinton’s Foundation and Personal Gains They are bragging that they can shake down foundation clients, for Bill Clinton money… This is sleazy… Joe Scarborough. Follow the money.""",
"""President Barack Obama is ramping up efforts to convince individual House members to grant him fast-track authority to negotiate trade deals, focusing his efforts on a dwindling group of undecided Democratic lawmakers.But Democrats who have already backed the deal publicly said these members need to be convinced they are not trading away their own political futures for a vote on fast-track. Potentially decisive are moderate, pro-growth members of the New Democrat Coalition. Its vice-chair, Rep. Jim Himes (D., Conn.), spoke as recently as Monday to the president, after fielding calls from the White House during last week’s recess as well.""",
"""Most American spend over 9 hours a day using media. Is this making us dumber?? Are you living in a media induced trace? Do you know the truth of the world or do you know what the manipulators want you to know?! Professor Jerry Kroth (Ph. D. Psychology) examines the ties between advertising and factual knowledge. Most people can name every mascot of most companies, but they can not name hardly any historical figures. How did we get here? These are questions that Professor Kroth explains in this well thought out presentation. This talk is based on Dr. Kroth's recent book, "Duped! Delusion, Denial, and the end of the American dream." """,
"""Michael Brown’s parents plan to bring a civil lawsuit for the wrongful death of their son against Darren Wilson and the city of Ferguson, Missouri. The announcment came a day after the Justice Department released its report on the abuses of the city’s police department and said Wilson wouldn’t be charged for violating Brown’s civil rights. Brown family lawyers note that the burden of proof is lower in a civil case than the criminal cases that were considered by both the federal government and a St. Louis County grand jury.""",
"""It’s Going to Change RADICALLY With Silver – HUGE Demand Coming | Cliff High Data mining expert Cliff High says the economy is much worse than most people think, and that bubble is going to pop after Election Day. Inflation is also coming, and that will be very positive for precious metals . High contends, “ Gold and silver are going to rise relative to the falling currencies. Gold and silver in actual purchasing power will also rise. They won’t be saying an ounce of gold bought a good suit 100 years ago and an ounce of gold will buy a good suit now. That’s going to change, and it’s also going to change radically with silver . Also, in our data sets between 2019 and 2024 , silver becomes the metal to have… You need to have silver . 2017 Gold Pandas and 2017 Silver Pandas Are Now Available! Secure Your 2017 Panda Coins Today at SD Bullion!""",
"""Senate Majority Leader Mitch McConnell announced a "Plan B" to halt a nuclear deal that would lift sanctions against Iran.The measure, which Republican aides said likely would be voted on Thursday, would prevent President Obama from lifting the sanctions until Iran releases four jailed Americans and recognizes the right of Israel to exist.McConnell made the announcement as Democrats prepared for a second time to filibuster a resolution of disapproval of the nuclear deal. Soon after McConnell's remarks, 42 Democrats again filibustered the resolution, preventing it from getting the 60 votes needed to advance."My strong preference is for Democrats to simply allow an up-or-down vote on the president's Iran deal." McConnell said. "But if they're determined to make that impossible, then at the very least we should be able to provide some protection to Israel and long-overdue relief to Americans who've languished in Iranian custody for years. Either way, this debate will continue." """,
"""TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! fisher 5 mins ago News Comments Off on TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! Breaking! Breaking! Bad news for Hillary in Florida. Early voting numbers from Florida are showing that Republicans have cast 17,000 more votes than Democrats. *** 6 days before the Election in 2012, Democrats in Florida cast 39,000 more votes than Republicans. *** Today, six days before the election, Republicans have now cast 17,000 more votes than Democrats. Watch Trump in Miami, FL today: """,
"""Sen. Marco Rubio (R-Fla.) is adding a veteran New Hampshire political operative to his team as he continues mulling a possible 2016 presidential bid, the latest sign that he is seriously preparing to launch a campaign later this year.Jim Merrill, who worked for former GOP presidential nominee Mitt Romney and ran his 2008 and 2012 New Hampshire primary campaigns, joined Rubio’s fledgling campaign on Monday, aides to the senator said.Merrill will be joining Rubio’s Reclaim America PAC to focus on Rubio’s New Hampshire and broader Northeast political operations."Marco has always been well received in New Hampshire, and should he run for president, he would be very competitive there," Terry Sullivan, who runs Reclaim America, said in a statement. "Jim certainly knows how to win in New Hampshire and in the Northeast, and will be a great addition to our team at Reclaim America.”News of Merrill’s hire was first reported by The New York Times.""",
]
###Output
_____no_output_____
###Markdown
4. Define Spark NLP pipeline
###Code
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
use = UniversalSentenceEncoder.pretrained(lang="en") \
.setInputCols(["document"])\
.setOutputCol("sentence_embeddings")
document_classifier = ClassifierDLModel.pretrained(model_name)\
.setInputCols(['document', 'sentence_embeddings']).setOutputCol("class")
nlpPipeline = Pipeline(stages=[
documentAssembler,
use,
document_classifier
])
###Output
_____no_output_____
###Markdown
5. Run the pipeline
###Code
empty_df = spark.createDataFrame([['']]).toDF("text")
pipelineModel = nlpPipeline.fit(empty_df)
df = spark.createDataFrame(pd.DataFrame({"text":text_list}))
result = pipelineModel.transform(df).toPandas()
###Output
_____no_output_____
###Markdown
6. Visualize results
###Code
result.select(F.explode(F.arrays_zip('document.result', 'class.result')).alias("cols")) \
.select(F.expr("cols['0']").alias("document"),
F.expr("cols['1']").alias("class")).show(truncate=False)
###Output
_____no_output_____
###Markdown
[](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/CLASSIFICATION_EN_FAKENEWS.ipynb) **Detect fake news** 1. Colab Setup
###Code
# Install java
!apt-get update -qq
!apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
!java -version
# Install pyspark
!pip install --ignore-installed -q pyspark==2.4.4
# Install Sparknlp
!pip install --ignore-installed spark-nlp
import pandas as pd
import numpy as np
import os
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
import json
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from sparknlp.annotator import *
from sparknlp.base import *
import sparknlp
from sparknlp.pretrained import PretrainedPipeline
###Output
_____no_output_____
###Markdown
2. Start Spark Session
###Code
spark = sparknlp.start()
###Output
_____no_output_____
###Markdown
3. Select the DL model
###Code
### Select Model
model_name = 'classifierdl_use_fakenews'
###Output
_____no_output_____
###Markdown
4. Some sample examples
###Code
text_list=[
"""Donald Trump a KGB Spy? 11/02/2016 In today’s video, Christopher Greene of AMTV reports Hillary Clinton campaign accusation that Donald Trump is a KGB spy is about as weak and baseless a claim as a Salem witch hunt or McCarthy era trial. It’s only because Hillary Clinton is losing that she is lobbing conspiracy theory. Citizen Quasar The way I see it, one of two things will happen: 1. Trump will win by a landslide but the election will be stolen via electronic voting, just like I have been predicting for over a decade, and the American People will accept the skewed election results just like they accept the TSA into their crotches. 2. Somebody will bust a cap in Hillary’s @$$ killing her and the election will be postponed. Follow AMTV!""",
"""President Barack Obama said former Secretary of State Hillary Clinton’s use of a personal email server was a mistake, but that U.S. national security hadn’t been endangered. In his first extensive remarks on the controversy that has roiled the Democratic presidential primary, Mr. Obama said on CBS’s “60 Minutes” program that questions about Mrs. Clinton’s email arrangement were legitimate. “It is important for her to answer these questions to the satisfaction of the American public,” Mr. Obama said.""",
"""Abby Martin Exposes What Hillary Clinton Really Represents ‹ › Since 2011, VNN has operated as part of the Veterans Today Network ; a group that operates over 50 plus media, information and service online sites for U.S. Military Veterans. Morning Joe Destroys Corrupt Clinton Foundation (Laughable) “Total Corruption” By VNN on October 28, 2016 'Pay for Play' and 'Quid Pro Quo' 'Shut Down The Foundation' Inside the Clinton’s Foundation and Personal Gains They are bragging that they can shake down foundation clients, for Bill Clinton money… This is sleazy… Joe Scarborough. Follow the money.""",
"""President Barack Obama is ramping up efforts to convince individual House members to grant him fast-track authority to negotiate trade deals, focusing his efforts on a dwindling group of undecided Democratic lawmakers.But Democrats who have already backed the deal publicly said these members need to be convinced they are not trading away their own political futures for a vote on fast-track. Potentially decisive are moderate, pro-growth members of the New Democrat Coalition. Its vice-chair, Rep. Jim Himes (D., Conn.), spoke as recently as Monday to the president, after fielding calls from the White House during last week’s recess as well.""",
"""Most American spend over 9 hours a day using media. Is this making us dumber?? Are you living in a media induced trace? Do you know the truth of the world or do you know what the manipulators want you to know?! Professor Jerry Kroth (Ph. D. Psychology) examines the ties between advertising and factual knowledge. Most people can name every mascot of most companies, but they can not name hardly any historical figures. How did we get here? These are questions that Professor Kroth explains in this well thought out presentation. This talk is based on Dr. Kroth's recent book, "Duped! Delusion, Denial, and the end of the American dream." """,
"""Michael Brown’s parents plan to bring a civil lawsuit for the wrongful death of their son against Darren Wilson and the city of Ferguson, Missouri. The announcment came a day after the Justice Department released its report on the abuses of the city’s police department and said Wilson wouldn’t be charged for violating Brown’s civil rights. Brown family lawyers note that the burden of proof is lower in a civil case than the criminal cases that were considered by both the federal government and a St. Louis County grand jury.""",
"""It’s Going to Change RADICALLY With Silver – HUGE Demand Coming | Cliff High Data mining expert Cliff High says the economy is much worse than most people think, and that bubble is going to pop after Election Day. Inflation is also coming, and that will be very positive for precious metals . High contends, “ Gold and silver are going to rise relative to the falling currencies. Gold and silver in actual purchasing power will also rise. They won’t be saying an ounce of gold bought a good suit 100 years ago and an ounce of gold will buy a good suit now. That’s going to change, and it’s also going to change radically with silver . Also, in our data sets between 2019 and 2024 , silver becomes the metal to have… You need to have silver . 2017 Gold Pandas and 2017 Silver Pandas Are Now Available! Secure Your 2017 Panda Coins Today at SD Bullion!""",
"""Senate Majority Leader Mitch McConnell announced a "Plan B" to halt a nuclear deal that would lift sanctions against Iran.The measure, which Republican aides said likely would be voted on Thursday, would prevent President Obama from lifting the sanctions until Iran releases four jailed Americans and recognizes the right of Israel to exist.McConnell made the announcement as Democrats prepared for a second time to filibuster a resolution of disapproval of the nuclear deal. Soon after McConnell's remarks, 42 Democrats again filibustered the resolution, preventing it from getting the 60 votes needed to advance."My strong preference is for Democrats to simply allow an up-or-down vote on the president's Iran deal." McConnell said. "But if they're determined to make that impossible, then at the very least we should be able to provide some protection to Israel and long-overdue relief to Americans who've languished in Iranian custody for years. Either way, this debate will continue." """,
"""TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! fisher 5 mins ago News Comments Off on TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! Breaking! Breaking! Bad news for Hillary in Florida. Early voting numbers from Florida are showing that Republicans have cast 17,000 more votes than Democrats. *** 6 days before the Election in 2012, Democrats in Florida cast 39,000 more votes than Republicans. *** Today, six days before the election, Republicans have now cast 17,000 more votes than Democrats. Watch Trump in Miami, FL today: """,
"""Sen. Marco Rubio (R-Fla.) is adding a veteran New Hampshire political operative to his team as he continues mulling a possible 2016 presidential bid, the latest sign that he is seriously preparing to launch a campaign later this year.Jim Merrill, who worked for former GOP presidential nominee Mitt Romney and ran his 2008 and 2012 New Hampshire primary campaigns, joined Rubio’s fledgling campaign on Monday, aides to the senator said.Merrill will be joining Rubio’s Reclaim America PAC to focus on Rubio’s New Hampshire and broader Northeast political operations."Marco has always been well received in New Hampshire, and should he run for president, he would be very competitive there," Terry Sullivan, who runs Reclaim America, said in a statement. "Jim certainly knows how to win in New Hampshire and in the Northeast, and will be a great addition to our team at Reclaim America.”News of Merrill’s hire was first reported by The New York Times.""",
]
###Output
_____no_output_____
###Markdown
5. Define Spark NLP pipeline
###Code
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
use = UniversalSentenceEncoder.pretrained(lang="en") \
.setInputCols(["document"])\
.setOutputCol("sentence_embeddings")
document_classifier = ClassifierDLModel.pretrained(model_name)\
.setInputCols(['document', 'sentence_embeddings']).setOutputCol("class")
nlpPipeline = Pipeline(stages=[
documentAssembler,
use,
document_classifier
])
###Output
_____no_output_____
###Markdown
6. Run the pipeline
###Code
empty_df = spark.createDataFrame([['']]).toDF("text")
pipelineModel = nlpPipeline.fit(empty_df)
df = spark.createDataFrame(pd.DataFrame({"text":text_list}))
result = pipelineModel.transform(df)
###Output
_____no_output_____
###Markdown
7. Visualize results
###Code
result.select(F.explode(F.arrays_zip('class.result', 'document.result')).alias("cols")) \
.select(F.expr("cols['0']").alias("class"),
F.expr("cols['1']").alias("document")).show(truncate=False)
###Output
_____no_output_____
###Markdown
[](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/CLASSIFICATION_EN_FAKENEWS.ipynb) **Detect fake news** 1. Colab Setup
###Code
# Install java
!apt-get update -qq
!apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
!java -version
# Install pyspark
!pip install --ignore-installed -q pyspark==2.4.4
# Install Sparknlp
!pip install --ignore-installed spark-nlp
import pandas as pd
import numpy as np
import os
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
import json
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from sparknlp.annotator import *
from sparknlp.base import *
import sparknlp
from sparknlp.pretrained import PretrainedPipeline
###Output
_____no_output_____
###Markdown
2. Start Spark Session
###Code
spark = sparknlp.start()
###Output
_____no_output_____
###Markdown
3. Select the DL model
###Code
### Select Model
model_name = 'classifierdl_use_fakenews'
###Output
_____no_output_____
###Markdown
4. Some sample examples
###Code
text_list=[
"""Donald Trump a KGB Spy? 11/02/2016 In today’s video, Christopher Greene of AMTV reports Hillary Clinton campaign accusation that Donald Trump is a KGB spy is about as weak and baseless a claim as a Salem witch hunt or McCarthy era trial. It’s only because Hillary Clinton is losing that she is lobbing conspiracy theory. Citizen Quasar The way I see it, one of two things will happen: 1. Trump will win by a landslide but the election will be stolen via electronic voting, just like I have been predicting for over a decade, and the American People will accept the skewed election results just like they accept the TSA into their crotches. 2. Somebody will bust a cap in Hillary’s @$$ killing her and the election will be postponed. Follow AMTV!""",
"""President Barack Obama said former Secretary of State Hillary Clinton’s use of a personal email server was a mistake, but that U.S. national security hadn’t been endangered. In his first extensive remarks on the controversy that has roiled the Democratic presidential primary, Mr. Obama said on CBS’s “60 Minutes” program that questions about Mrs. Clinton’s email arrangement were legitimate. “It is important for her to answer these questions to the satisfaction of the American public,” Mr. Obama said.""",
"""Abby Martin Exposes What Hillary Clinton Really Represents ‹ › Since 2011, VNN has operated as part of the Veterans Today Network ; a group that operates over 50 plus media, information and service online sites for U.S. Military Veterans. Morning Joe Destroys Corrupt Clinton Foundation (Laughable) “Total Corruption” By VNN on October 28, 2016 'Pay for Play' and 'Quid Pro Quo' 'Shut Down The Foundation' Inside the Clinton’s Foundation and Personal Gains They are bragging that they can shake down foundation clients, for Bill Clinton money… This is sleazy… Joe Scarborough. Follow the money.""",
"""President Barack Obama is ramping up efforts to convince individual House members to grant him fast-track authority to negotiate trade deals, focusing his efforts on a dwindling group of undecided Democratic lawmakers.But Democrats who have already backed the deal publicly said these members need to be convinced they are not trading away their own political futures for a vote on fast-track. Potentially decisive are moderate, pro-growth members of the New Democrat Coalition. Its vice-chair, Rep. Jim Himes (D., Conn.), spoke as recently as Monday to the president, after fielding calls from the White House during last week’s recess as well.""",
"""Most American spend over 9 hours a day using media. Is this making us dumber?? Are you living in a media induced trace? Do you know the truth of the world or do you know what the manipulators want you to know?! Professor Jerry Kroth (Ph. D. Psychology) examines the ties between advertising and factual knowledge. Most people can name every mascot of most companies, but they can not name hardly any historical figures. How did we get here? These are questions that Professor Kroth explains in this well thought out presentation. This talk is based on Dr. Kroth's recent book, "Duped! Delusion, Denial, and the end of the American dream." """,
"""Michael Brown’s parents plan to bring a civil lawsuit for the wrongful death of their son against Darren Wilson and the city of Ferguson, Missouri. The announcment came a day after the Justice Department released its report on the abuses of the city’s police department and said Wilson wouldn’t be charged for violating Brown’s civil rights. Brown family lawyers note that the burden of proof is lower in a civil case than the criminal cases that were considered by both the federal government and a St. Louis County grand jury.""",
"""It’s Going to Change RADICALLY With Silver – HUGE Demand Coming | Cliff High Data mining expert Cliff High says the economy is much worse than most people think, and that bubble is going to pop after Election Day. Inflation is also coming, and that will be very positive for precious metals . High contends, “ Gold and silver are going to rise relative to the falling currencies. Gold and silver in actual purchasing power will also rise. They won’t be saying an ounce of gold bought a good suit 100 years ago and an ounce of gold will buy a good suit now. That’s going to change, and it’s also going to change radically with silver . Also, in our data sets between 2019 and 2024 , silver becomes the metal to have… You need to have silver . 2017 Gold Pandas and 2017 Silver Pandas Are Now Available! Secure Your 2017 Panda Coins Today at SD Bullion!""",
"""Senate Majority Leader Mitch McConnell announced a "Plan B" to halt a nuclear deal that would lift sanctions against Iran.The measure, which Republican aides said likely would be voted on Thursday, would prevent President Obama from lifting the sanctions until Iran releases four jailed Americans and recognizes the right of Israel to exist.McConnell made the announcement as Democrats prepared for a second time to filibuster a resolution of disapproval of the nuclear deal. Soon after McConnell's remarks, 42 Democrats again filibustered the resolution, preventing it from getting the 60 votes needed to advance."My strong preference is for Democrats to simply allow an up-or-down vote on the president's Iran deal." McConnell said. "But if they're determined to make that impossible, then at the very least we should be able to provide some protection to Israel and long-overdue relief to Americans who've languished in Iranian custody for years. Either way, this debate will continue." """,
"""TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! fisher 5 mins ago News Comments Off on TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! Breaking! Breaking! Bad news for Hillary in Florida. Early voting numbers from Florida are showing that Republicans have cast 17,000 more votes than Democrats. *** 6 days before the Election in 2012, Democrats in Florida cast 39,000 more votes than Republicans. *** Today, six days before the election, Republicans have now cast 17,000 more votes than Democrats. Watch Trump in Miami, FL today: """,
"""Sen. Marco Rubio (R-Fla.) is adding a veteran New Hampshire political operative to his team as he continues mulling a possible 2016 presidential bid, the latest sign that he is seriously preparing to launch a campaign later this year.Jim Merrill, who worked for former GOP presidential nominee Mitt Romney and ran his 2008 and 2012 New Hampshire primary campaigns, joined Rubio’s fledgling campaign on Monday, aides to the senator said.Merrill will be joining Rubio’s Reclaim America PAC to focus on Rubio’s New Hampshire and broader Northeast political operations."Marco has always been well received in New Hampshire, and should he run for president, he would be very competitive there," Terry Sullivan, who runs Reclaim America, said in a statement. "Jim certainly knows how to win in New Hampshire and in the Northeast, and will be a great addition to our team at Reclaim America.”News of Merrill’s hire was first reported by The New York Times.""",
]
###Output
_____no_output_____
###Markdown
5. Define Spark NLP pipeline
###Code
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
use = UniversalSentenceEncoder.pretrained(lang="en") \
.setInputCols(["document"])\
.setOutputCol("sentence_embeddings")
document_classifier = ClassifierDLModel.pretrained(model_name)\
.setInputCols(['document', 'sentence_embeddings']).setOutputCol("class")
nlpPipeline = Pipeline(stages=[
documentAssembler,
use,
document_classifier
])
###Output
tfhub_use download started this may take some time.
Approximate size to download 923.7 MB
[OK!]
classifierdl_use_fakenews download started this may take some time.
Approximate size to download 21.4 MB
[OK!]
###Markdown
6. Run the pipeline
###Code
empty_df = spark.createDataFrame([['']]).toDF("text")
pipelineModel = nlpPipeline.fit(empty_df)
df = spark.createDataFrame(pd.DataFrame({"text":text_list}))
result = pipelineModel.transform(df)
###Output
_____no_output_____
###Markdown
7. Visualize results
###Code
result.select(F.explode(F.arrays_zip('class.result', 'document.result')).alias("cols")) \
.select(F.expr("cols['0']").alias("class"),
F.expr("cols['1']").alias("document")).show(truncate=False)
###Output
+-----+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|class|document |
+-----+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|FAKE |Donald Trump a KGB Spy? 11/02/2016 In today’s video, Christopher Greene of AMTV reports Hillary Clinton campaign accusation that Donald Trump is a KGB spy is about as weak and baseless a claim as a Salem witch hunt or McCarthy era trial. It’s only because Hillary Clinton is losing that she is lobbing conspiracy theory. Citizen Quasar The way I see it, one of two things will happen: 1. Trump will win by a landslide but the election will be stolen via electronic voting, just like I have been predicting for over a decade, and the American People will accept the skewed election results just like they accept the TSA into their crotches. 2. Somebody will bust a cap in Hillary’s @$$ killing her and the election will be postponed. Follow AMTV! |
|REAL |President Barack Obama said former Secretary of State Hillary Clinton’s use of a personal email server was a mistake, but that U.S. national security hadn’t been endangered. In his first extensive remarks on the controversy that has roiled the Democratic presidential primary, Mr. Obama said on CBS’s “60 Minutes” program that questions about Mrs. Clinton’s email arrangement were legitimate. “It is important for her to answer these questions to the satisfaction of the American public,” Mr. Obama said. |
|FAKE |Abby Martin Exposes What Hillary Clinton Really Represents ‹ › Since 2011, VNN has operated as part of the Veterans Today Network ; a group that operates over 50 plus media, information and service online sites for U.S. Military Veterans. Morning Joe Destroys Corrupt Clinton Foundation (Laughable) “Total Corruption” By VNN on October 28, 2016 'Pay for Play' and 'Quid Pro Quo' 'Shut Down The Foundation' Inside the Clinton’s Foundation and Personal Gains They are bragging that they can shake down foundation clients, for Bill Clinton money… This is sleazy… Joe Scarborough. Follow the money. |
|REAL |President Barack Obama is ramping up efforts to convince individual House members to grant him fast-track authority to negotiate trade deals, focusing his efforts on a dwindling group of undecided Democratic lawmakers.But Democrats who have already backed the deal publicly said these members need to be convinced they are not trading away their own political futures for a vote on fast-track. Potentially decisive are moderate, pro-growth members of the New Democrat Coalition. Its vice-chair, Rep. Jim Himes (D., Conn.), spoke as recently as Monday to the president, after fielding calls from the White House during last week’s recess as well. |
|FAKE |Most American spend over 9 hours a day using media. Is this making us dumber?? Are you living in a media induced trace? Do you know the truth of the world or do you know what the manipulators want you to know?! Professor Jerry Kroth (Ph. D. Psychology) examines the ties between advertising and factual knowledge. Most people can name every mascot of most companies, but they can not name hardly any historical figures. How did we get here? These are questions that Professor Kroth explains in this well thought out presentation. This talk is based on Dr. Kroth's recent book, "Duped! Delusion, Denial, and the end of the American dream." |
|REAL |Michael Brown’s parents plan to bring a civil lawsuit for the wrongful death of their son against Darren Wilson and the city of Ferguson, Missouri. The announcment came a day after the Justice Department released its report on the abuses of the city’s police department and said Wilson wouldn’t be charged for violating Brown’s civil rights. Brown family lawyers note that the burden of proof is lower in a civil case than the criminal cases that were considered by both the federal government and a St. Louis County grand jury. |
|FAKE |It’s Going to Change RADICALLY With Silver – HUGE Demand Coming | Cliff High Data mining expert Cliff High says the economy is much worse than most people think, and that bubble is going to pop after Election Day. Inflation is also coming, and that will be very positive for precious metals . High contends, “ Gold and silver are going to rise relative to the falling currencies. Gold and silver in actual purchasing power will also rise. They won’t be saying an ounce of gold bought a good suit 100 years ago and an ounce of gold will buy a good suit now. That’s going to change, and it’s also going to change radically with silver . Also, in our data sets between 2019 and 2024 , silver becomes the metal to have… You need to have silver . 2017 Gold Pandas and 2017 Silver Pandas Are Now Available! Secure Your 2017 Panda Coins Today at SD Bullion! |
|REAL |Senate Majority Leader Mitch McConnell announced a "Plan B" to halt a nuclear deal that would lift sanctions against Iran.The measure, which Republican aides said likely would be voted on Thursday, would prevent President Obama from lifting the sanctions until Iran releases four jailed Americans and recognizes the right of Israel to exist.McConnell made the announcement as Democrats prepared for a second time to filibuster a resolution of disapproval of the nuclear deal. Soon after McConnell's remarks, 42 Democrats again filibustered the resolution, preventing it from getting the 60 votes needed to advance."My strong preference is for Democrats to simply allow an up-or-down vote on the president's Iran deal." McConnell said. "But if they're determined to make that impossible, then at the very least we should be able to provide some protection to Israel and long-overdue relief to Americans who've languished in Iranian custody for years. Either way, this debate will continue." |
|FAKE |TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! fisher 5 mins ago News Comments Off on TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! TRUMP TSUNAMI INCOMING: What Trump Did In Florida Today Will Make Him President! Breaking! Breaking! Bad news for Hillary in Florida. Early voting numbers from Florida are showing that Republicans have cast 17,000 more votes than Democrats. *** 6 days before the Election in 2012, Democrats in Florida cast 39,000 more votes than Republicans. *** Today, six days before the election, Republicans have now cast 17,000 more votes than Democrats. Watch Trump in Miami, FL today: |
|REAL |Sen. Marco Rubio (R-Fla.) is adding a veteran New Hampshire political operative to his team as he continues mulling a possible 2016 presidential bid, the latest sign that he is seriously preparing to launch a campaign later this year.Jim Merrill, who worked for former GOP presidential nominee Mitt Romney and ran his 2008 and 2012 New Hampshire primary campaigns, joined Rubio’s fledgling campaign on Monday, aides to the senator said.Merrill will be joining Rubio’s Reclaim America PAC to focus on Rubio’s New Hampshire and broader Northeast political operations."Marco has always been well received in New Hampshire, and should he run for president, he would be very competitive there," Terry Sullivan, who runs Reclaim America, said in a statement. "Jim certainly knows how to win in New Hampshire and in the Northeast, and will be a great addition to our team at Reclaim America.”News of Merrill’s hire was first reported by The New York Times. |
+-----+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
Week 1/Battle_of_the_Neighborhoods.ipynb | ###Markdown
Battle of the Neighborhoods**This notebook is created for the [Applied Data Science Capstone](https://www.coursera.org/learn/applied-data-science-capstone) course as part of the [IBM Data Science Professional Certificate](https://www.coursera.org/professional-certificates/ibm-data-science) Specialization.**
###Code
import pandas as pd
import numpy as np
print("Hello Capstone Project Course")
###Output
_____no_output_____ |
notebooks/mynotebook(1).ipynb | ###Markdown
Introduction To Python course UM FHML: My appSee [this gallery](https://github.com/voila-dashboards/voila/tree/main/notebooks) for examples of notebooks turned into applications with voila.Here's another very basic example.
###Code
import altair as alt
import pandas as pd
titanic = pd.read_csv('https://raw.githubusercontent.com/NHameleers/dtz2025-datasets/master/titanic.csv')
alt.Chart(titanic).mark_bar().encode(
y='mean(Survived)',
x='Sex',
column='Pclass:O',
color='Sex'
).properties(
width=150,
height=350
).properties(title='Not the Titanic again?!')
###Output
_____no_output_____ |
notebooks/02 - Fourierove vrste.ipynb | ###Markdown
(sec:fourierove_vrste)= Fourierove vrste Cilj Fourierovih vrst je, da bi poljubne (periodične) podatke opisali z vsoto harmonskih funkcij različnih osnovnih period. Ta cilj je v tem trenutku definiran relativno površno; vse detajle bomo pogledali v tem poglavju. Pot nas bo vodila preko sledečih korakov:1. zapis periodične funkcije, kot vsote harmonskih funkcij 1. identifikacija harmonskih funkcij iz točke 1.1. posplošitevTo poglavje je napisano na podlagi večjega števila virov in lastnega raziskovanja. Med knjižnjimi viri bralcu priporočam {cite:t}`shin2008`, med video vsebinami pa [Grant Sanderson: But what is a Fourier series? From heat flow to drawing with circles]( https://www.youtube.com/watch?v=r6sGWTCMz2k). Zapis periodične funkcije, kot vsote harmonskih funkcijPreden gremo v podrobnosti Fourierovih vrst, si pogljemo definicijo splošne harmonske funkcije:$$x(t) = A\,\sin(\omega\,t+\varphi),$$kjer je $A$ amplituda, $\omega$ krožna frekvenca (enota: rad/s) in $\varphi$ fazni kot (enota rad) glede na izhodišče časa. Pri tem je $\omega=2\,\pi\,f$ in je $f$ (sekundna) frekvenca (enota: 1/s=Hz). Pozitivna faza pomakne harmonsko funkcijo proti levi, negativna faza pa proti desni. Perioda $T$, ko se harmonska funkcija ponovi je definirana z krožno frekvenco: $T=2\,\pi/\omega$.Primer harmonske funkcije z amplitudo $A=1$, krožno frekvenco $\omega=1\,$rad/s in pozitivno fazo $\varphi=1\,$rad je prikazan spodaj.
###Code
import numpy as np
import matplotlib.pyplot as plt
T = 6
A = 1
t, dt = np.linspace(-0.5*T,1.5*T,201, retstep=True)
ω = 1
ϕ = 1
fig, ax1 = plt.subplots()
plt.title(f'Harmonska funkcija: $x(t) = A\,\sin(\\omega\,t+\\varphi); \\omega={ω}, \\varphi={ϕ}$ ')
ax1.set_xlabel('$t\\quad $[s]')
ax1.set_ylabel('$x(t)\quad$[]', color='C0')
ax1.tick_params(axis='y', labelcolor='C0')
ax1.vlines(0, -1.3, 1.3, 'k', lw=0.5)
ax1.hlines(0, np.min(t), np.max(t), 'k', lw=0.5)
ax1.plot(t, A*np.sin(ω*t+ϕ), label='Harmonski podatki', c='C1', lw=2, alpha=0.8)
ax1.vlines(-ϕ+2*np.pi*np.arange(T/(ω*2*np.pi)+1), -1.3, 1.3, ls='-.', lw=1)
ax1.annotate('$\\varphi$',
xy=(-ϕ, 1.1), xycoords='data',
va='center', size='large',
xytext=(0.1, 1.1), textcoords='data',
arrowprops=dict(arrowstyle='->',
connectionstyle='arc3,rad=0',
color="k", shrinkA=0, shrinkB=0),
)
ax1.hlines(A, 1., 3, ls='-.', lw=1)
ax1.annotate('$A$',
xy=(2.5, 0), xycoords='data',
ha='center', size='large',
xytext=(2.5, A+.05), textcoords='data',
arrowprops=dict(arrowstyle='<-',
connectionstyle='arc3,rad=0',
color="k", shrinkA=0, shrinkB=0),
)
ax1.annotate('$T=\\frac{2\,\pi}{\\omega}$',
xy=(-ϕ, -1.1), xycoords='data',
va='center', size='large',
xytext=(2*np.pi-ϕ+0.1, -1.1), textcoords='data',
arrowprops=dict(arrowstyle='<->',
connectionstyle='arc3,rad=0',
color="k", shrinkA=0, shrinkB=0),
)
plt.show()
###Output
_____no_output_____
###Markdown
Potem, ko smo podrobneje spoznali eno harmonsko funkcijo, nadaljujemo s vsoto harmonskih funkcij, pri tem se bomo omejili na kompleksno periodične podatke (glejte {ref}`sec:klasifikacija_pod`), kjer je razmerje med frekvencami posameznih harmonikov racionalno število. Frekvence harmonskih funkcij bomo dodatno omejili: če je osnovna krožna frekvenca $\omega_p=2\,\pi/T_p$ definirana z osnovno periodo $T_p$, potem bomo za vse ostale harmonike zahtevali, da imajo krožno frekvenco, ki je večkratnik osnovne krožne frekvence.Opisu ustreza vsota harmonskih funkcij:$$x(t)=\frac{a_0}{2}+\sum_{n=1}^{\infty}\left[a_n\cos\left(\frac{2\,\pi\,n\,t}{T_p} \right)+b_n\sin\left(\frac{2\,\pi\,n\,t}{T_p} \right)\right],$$kjer je $a_0$ konstanta, $a_n$ amplituda $n$-te cosinusne komponente in $b_n$ amplituda $n$-te sinusne komponente.Spodaj si poglejmo primer harmonske funkcije, ki ustreza zgornjim omejitvam.
###Code
import numpy as np
import matplotlib.pyplot as plt
Tp = 2
t, dt = np.linspace(-0.5*Tp,2.5*Tp,301, retstep=True)
a0 = 1.
N = 10
seed = 0
rg = np.random.default_rng(seed)
a = rg.normal(size=N)*1/np.arange(1,N+1)**2 #skaliranje na koncu, da imajo višje komponente imajo manjšo amplitudo
b = rg.normal(size=N)*1/np.arange(1,N+1)**2
x = a0/2
for n in range(N):
x += a[n]*np.cos(2*np.pi*(n+1)*t/Tp) + b[n]*np.sin(2*np.pi*(n+1)*t/Tp)
fig, ax1 = plt.subplots()
plt.title(f'Kompleksno periodična funkcija')
ax1.set_xlabel('$t\\quad $[s]')
ax1.set_ylabel('$x(t)\quad$[-]', color='C1')
ax1.tick_params(axis='y', labelcolor='C1')
ax1.vlines(0, -2, 2, 'k', lw=0.5)
ax1.hlines(0, np.min(t), np.max(t), 'k', lw=0.5)
ax1.plot(t, x, label='Periodični podatki', c='C1', lw=2, alpha=0.8)
ax1.vlines([Tp], -2, 2, ls='-.', lw=1)
ax1.hlines(a0/2, np.min(t), np.max(t), ls='-.', lw=1)
ax1.annotate('$\\frac{a_0}{2}$',
xy=(2.5, 0), xycoords='data',
ha='center', size='large',
xytext=(2.5, a0/2+.15), textcoords='data',
arrowprops=dict(arrowstyle='<-',
connectionstyle='arc3,rad=0',
color="k", shrinkA=0, shrinkB=0),
)
ax1.annotate('$T_{p}$',
xy=(0, -0.5), xycoords='data',
va='center', size='large',
xytext=(Tp+0.05, -0.5), textcoords='data',
arrowprops=dict(arrowstyle='<->',
connectionstyle='arc3,rad=0',
color="k", shrinkA=0, shrinkB=0),
)
plt.ylim(-1,2)
plt.show()
###Output
_____no_output_____
###Markdown
Periodična funkcija kakor smo si jo pogledali zgoraj je ključnega pomena za razumevanje Fourierovih vrst in pozneje Fourierove transformacije (glejte {ref}`sec:fourierova_int_transf`). V nadaljevanju bomo periodično funkcijo najprej preoblikovali v kompleksno obliko, saj nam bo to zelo poenostavilo matematični zapis in fizikalno jasnost.Zapis v kompleksno obliko začnemo s pomočjo Eulerjeve formule:$$e^{\mathrm{i}\,\alpha} = \cos\alpha + \mathrm{i}\,\sin\alpha,$$kjer je $\mathrm{i}=\sqrt{-1}$. Če uporabimo $\alpha=2\,\pi\,n\,t/T_p$, lahko zapišemo:$$\cos\alpha=\frac{1}{2}e^{\mathrm{i}\,\alpha}+\frac{1}{2}e^{-\mathrm{i}\,\alpha}\qquad\sin\alpha=-\frac{1}{2}\,\mathrm{i}\,e^{\mathrm{i}\,\alpha}+\frac{1}{2}\,\mathrm{i}\,e^{-\mathrm{i}\,\alpha}$$ ter funkcijo:$$x(t)=\frac{a_0}{2}+\sum_{n=1}^{\infty}\left[a_n\cos\big(\frac{2\,\pi\,n\,t}{T_p} \big)+b_n\sin\big(\frac{2\,\pi\,n\,t}{T_p} \big)\right],$$ preoblikujemo v:$$x(t)=\frac{a_0}{2}+\sum_{n=1}^{\infty}\left[a_n\big( \frac{1}{2}e^{\mathrm{i}\,\alpha}+\frac{1}{2}e^{-\mathrm{i}\,\alpha}\big)+b_n\big( -\frac{1}{2}\,\mathrm{i}\,e^{\mathrm{i}\,\alpha}+\frac{1}{2}\,\mathrm{i}\,e^{-\mathrm{i}\,\alpha}\big)\right].$$ Urejamo naprej:$$x(t)=\frac{a_0}{2}+\sum_{n=1}^{\infty}\left[\big( \frac{1}{2}a_n-\frac{1}{2}\mathrm{i}b_n\big)\,e^{\mathrm{i}\,2\,\pi\,n\,t/T_p}\right]+\sum_{n=1}^{\infty}\left[\big( \frac{1}{2}a_n+\frac{1}{2}\mathrm{i}b_n\big)\,e^{-\mathrm{i}\,2\,\pi\,n\,t/T_p}\right].$$ Zgornji izraz poenostavimo v:$$x(t)=\sum_{n=-\infty}^{\infty}c_n\,e^{\mathrm{i}\,2\pi\,n\,t/T_p},$$ kjer velja:$$c_n = \begin{cases} \frac{1}{2}a_n-\frac{1}{2}\textrm{i}b_n,& \text{če } n\geq +1\\ \frac{1}{2}a_n, & \text{če } n = 0\\ \frac{1}{2}a_{|n|}+\frac{1}{2}\textrm{i}b_{|n|},& \text{če } n\leq -1\\\end{cases}$$ Ugotovimo, da velja $c_n=c_{-n}^{*}$, kjer smo z ${}^*$ označili kompleksno konjugirano vrednost. :::{note}Zapis kompleksno periodične funkcije z realnimi koeficienti:$$x(t)=\frac{a_0}{2}+\sum_{n=1}^{\infty}\left[a_n\cos\left(\frac{2\,\pi\,n\,t}{T_p} \right)+b_n\sin\left(\frac{2\,\pi\,n\,t}{T_p} \right)\right]$$::::::{note}Zapis kompleksno periodične funkcije s kompleksnimi koeficienti:$$x(t)=\sum_{n=-\infty}^{\infty}c_n\,e^{\mathrm{i}\,2\pi\,n\,t/T_p},$$:::Oba zapisa zgoraj seveda vodita v isti rezultat (glejte primer spodaj).
###Code
import numpy as np
import matplotlib.pyplot as plt
Tp = 2
t, dt = np.linspace(-0.5*Tp,2.5*Tp,301, retstep=True)
a0 = 1.
N = 10
seed = 0
rg = np.random.default_rng(seed)
a = rg.normal(size=N)*1/np.arange(1,N+1)**2 #skaliranje na koncu, da imajo višje komponente imajo manjšo amplitudo
b = rg.normal(size=N)*1/np.arange(1,N+1)**2
c = np.zeros(2*N+1, dtype='complex')
c[N+1:] = 0.5*a-0.5j*b
c[N] = a0/2
c[:N] = np.conj(c[N+1:])[::-1]
x1 = a0/2
for n in range(N):
x1 += a[n]*np.cos(2*np.pi*(n+1)*t/Tp) + b[n]*np.sin(2*np.pi*(n+1)*t/Tp)
x = np.zeros(len(t), 'complex')
for n in range(-N,N+1):
x += c[N+n]*np.exp(2j*np.pi*n*t/Tp)
x = np.real(x) # teoretično pričakujemo samo realno rezultat (imaginarni del mora biti na nivoju numerične napake)
fig, ax1 = plt.subplots()
plt.title(f'Kompleksno periodična funkcija')
ax1.set_xlabel('$t\\quad $[s]')
ax1.set_ylabel('$x(t)\quad$[-]', color='C1')
ax1.tick_params(axis='y', labelcolor='C1')
ax1.vlines(0, -2, 2, 'k', lw=0.5)
ax1.hlines(0, np.min(t), np.max(t), 'k', lw=0.5)
ax1.plot(t, x1, label='Klasična definicija', c='C1', lw=2, alpha=0.8)
ax1.plot(t, x, label='Definicija v kompleksni obliki', c='C0', ls=':', lw=2, alpha=1)
ax1.vlines([Tp], -2, 2, ls='-.', lw=1)
ax1.hlines(a0/2, np.min(t), np.max(t), ls='-.', lw=1)
ax1.annotate('$\\frac{a_0}{2}$',
xy=(2.5, 0), xycoords='data',
ha='center', size='large',
xytext=(2.5, a0/2+.13), textcoords='data',
arrowprops=dict(arrowstyle='<-',
connectionstyle='arc3,rad=0',
color="k", shrinkA=0, shrinkB=0),
)
ax1.annotate('$T_{p}$',
xy=(0, -0.5), xycoords='data',
va='center', size='large',
xytext=(Tp+0.05, -0.5), textcoords='data',
arrowprops=dict(arrowstyle='<->',
connectionstyle='arc3,rad=0',
color="k", shrinkA=0, shrinkB=0),
)
ax1.legend(loc=1)
plt.ylim(-1,2)
plt.show()
###Output
_____no_output_____
###Markdown
Identifikacija harmonskih funkcij, ki sestavljajo periodično funkcijoSedaj se lahko vrnemo na izvorno nalogo Fourierovih vrst: *popis poljubne periodične funkcije s superpozicijo različnih harmonskih funkcij*. Vprašanje torej je: ali v periodični funkciji:$$x(t)=\sum_{n=-\infty}^{\infty}c_n\,e^{\mathrm{i}\,2\pi\,n\,t/T_p},\qquad c_n=c_{-n}^*$$lahko identificiramo harmonsko funkcijo s frekvenco $m$ kratnika osnovne frekvence, torej $c_m\,e^{\mathrm{i}\,2\pi\,m\,t/T_p}$? Izkaže se, da je to relativno enostavno, če poskušamo izračunati integral periodične funkcije $x(t)$ pomnožen z enotsko harmonsko funkcijo $e^{-\mathrm{i}\,2\pi\,m\,t/T_p}$, ki ima negativno frekvenco $-m/T_p$ (namesto $\textrm{i}$, imamo v eksponentu $\textrm{-i}$):$$\underbrace{\int_0^{T_p} x(t)\,e^{-\mathrm{i}\,2\pi\,m\,t/T_p}\,\mathrm{d}t}_{A_m}=\underbrace{\int_0^{T_p} \sum_{n=-\infty}^{\infty}c_n\,e^{\mathrm{i}\,2\pi\,n\,t/T_p} \,\,e^{-\mathrm{i}\,2\pi\,m\,t/T_p}\,\mathrm{d}t}_{B_m}$$ Ker $x(t)$ predstavlja znano periodično funkcijo (oz. podatke), lahko integral $A_m$ izračunamo. $B_m$ pa lahko poenostavimo v:$$B_m=\int_0^{T_p} \sum_{n=-\infty}^{\infty}c_n\,e^{\mathrm{i}\,2\pi\,(n-m)\,t/T_p} \,\mathrm{d}t$$ Zamenjajmo sedaj vrstni red seštevanja in integriranja, ter izpostavimo konstanto $c_n$:$$B_m= \sum_{n=-\infty}^{\infty}c_n\,\int_0^{T_p}e^{\mathrm{i}\,2\pi\,(n-m)\,t/T_p} \,\mathrm{d}t$$ Ker integriramo harmonsko funkcijo s frekvenco, ki je celi večkratnik osnovne frekvence $1/T_p$, je rezultat vedno 0, razen takrat, ko je $n=m$:$$\int_0^{T_p}e^{\mathrm{i}\,2\pi\,(n-m)\,t/T_p} \,\mathrm{d}t=\begin{cases} 0,& \text{če } n\ne m\\ T_p,& \text{če } n= m\\\end{cases}$$Sledi torej:$$B_m = c_m\,T_p$$ Ker velja $A_m=B_m$ sledi, da je neznan Fourierov koeficient $c_m$ definirana kot (gre za kompleksno število z amplitudo in fazo):$$c_m = \frac{A_m}{T_p} = \frac{1}{T_p}\,\int_0^{T_p} x(t)\,e^{-\mathrm{i}\,2\pi\,m\,t/T_p}\,\mathrm{d}t$$ Izpeljali smo torej, da je posamezno harmonsko komponento relativno enostavno določiti; težava v praksi je, da dani podatki $x(t)$ nujno ne izpolnjujejo vseh predpostavk, ki smo jih zgoraj naredili (npr. da je harmonik celi večkratnik osnovne frekvence). Kljub temu, pa Fourierove vrste uporabljamo in nam dajejo odlične rezultate. PosplošitevFourierove vrste najpogosteje najdemo definirane v treh oblikah ($T_p$ predstavlja osnovno period in $N$ število harmonskih komponent, ki jih upoštevamo/identificiramo): (sec:FV_exp)= Fourierove vrste v eksponentni (kompleksni) obliki:::{note}Fourierovi koeficienti:$$c_n = \frac{1}{T_p}\,\int_0^{T_p} x(t)\,e^{-\mathrm{i}\,2\pi\,n\,t/T_p}\,\mathrm{d}t$$$$x(t)=\sum_{n=-N}^{N}c_n\,e^{\mathrm{i}\,2\pi\,n\,t/T_p},\qquad c_n=c_{-n}^*$$::: (sec:FV_sin_cos)= Fourierove vrste v sinus-cosinusni obliki:::{note}Fourierovi koeficienti:$$a_n = \frac{1}{T_p}\,\int_0^{T_p} x(t)\,\cos(2\pi\,n\,t/T_p)\,\mathrm{d}t$$$$b_n = \frac{1}{T_p}\,\int_0^{T_p} x(t)\,\sin(2\pi\,n\,t/T_p)\,\mathrm{d}t$$Rekonstruirana zvezna funkcija:$$x(t)=\frac{a_0}{2}+\sum_{n=1}^{N}a_n\,\cos(2\pi\,n\,t/T_p)+b_n\,\sin(2\pi\,n\,t/T_p)$$::: Fourierove vrste v amplitudno-fazni obliki:::{note}Rekonstruirana zvezna funkcija:$$x(t)=\frac{A_0}{2}+\sum_{n=1}^{N}A_n\,\cos(2\pi\,n\,t/T_p-\varphi_n)$$:::Kjer moramo amplitudo $A_n$ in fazo $\varphi_n$ določit s pomočjo eksponentne ali sinusno-cosinusne oblike in ob pomoči prehodov med posameznimi oblikami zapisov: $$\begin{split}c_0&=A_0&=a_0\\c_n&=\frac{A_n}{2}\,e^{-\mathrm{i}\varphi_n}&=\frac{1}{2}(a_n-\mathrm{i}b_n)\\c_n&=c_{-n}^*&\\\end{split}$$V tej knjigi bomo skoraj izključno uporabljali eksponentno obliko, saj je najbolj kompaktna, enostavna za uporabo in matematične izpeljave. Nekatere lastnosti Fourierovih vrst Odvodi in integrali Fourierovih vrst :::{note}Za periodično funkcijo, ki jo na področju zveznosti lahko popišemo s Fourierovimi vrstami govorimo, da izpolnjujejo **Dirichletovim pogoj**. :::Če Dirichletov pogoj izpolnjuje tudi odvod $\dot x(t)$, potem je mogoče odvod izračunati neposredno iz Fourierovih vrst za $x(t)$ pri čemer je treba odvajati posamezne člene.$$\dot{x}(t)=\sum_{n=-N}^{N}\frac{\mathrm{i}\,2\pi\,n}{T_p}\,c_n\,e^{\mathrm{i}\,2\pi\,n\,t/T_p},\qquad c_n=c_{-n}^*$$Analogno velja za integracijo: $$x(t)=\sum_{n=-N}^{N}\frac{T_p}{\mathrm{i}\,2\pi\,n}\,\dot{c}_n\,e^{\mathrm{i}\,2\pi\,n\,t/T_p},\qquad \dot{c}_n=\dot{c}_{-n}^*,$$kjer so $\dot{c}_n$ konstante Fourierove vrste za $\dot{x}(t)$. Za primer, glejte zglede v nadaljevanju. Fourierove vrste lihih in sodih funkcij Funkcija $x(t)$ je **soda**, če velja:$$x(-t)=x(t),$$takrat velja tudi:$$\int_a^a x(t)\,\mathrm{d}t=2\,\int_0^a x(t)\,\mathrm{d}t.$$Funkcija je **liha**, če velja:$$x(-t)=-x(t),$$takrat velja tudi$$\int_a^a x(t)\,\mathrm{d}t=0.$$Izpostavimo lahko, da je funkcija $\cos()$ soda, funkcija $\sin()$ pa liha. Za poznejšo obravnavo bo pomemben produkt dveh funkcij (ko npr. pri izračunu koeficientov Fourierove vrste); ali je produkt funkcij $x(t)\,y(t)$ sodi ali lihi, je odvisno od sodosti in lihosti funkcij $x(t)$ in $y(t)$. Odnosi so definirani v tabeli spodaj.| če $x(t)$ | če $y(t)$ | je rezultat $x(t)\,y(t)$||:-|-:|:-:|| liha | liha | *soda* || liha | soda | *liha* || soda | liha | *liha* || soda | soda | *soda* |Glede na zgornjo tabelo lahko sklenemo (glejte tudi {ref}`sec:FV_sin_cos`)::::{note}* če iščemo Fourierovo vrsto *sode* periodične funkcije $x(t)$, potem bodo sinusni členi $b_n$ enaki nič (ker je produkt sode in lihe funkcije liha funkcija; integral lihe funkcije pa je 0).* če iščemo Fourierovo vrsto *lihe* peridične funkcije $x(t)$, potem bodo cosinusni členi $a_n$ enaki nič (ker je produkt lihe in sode funkcije liha funkcija; integral lihe funkcije pa je 0).::: Nekateri zgledi Kompleksno periodična funkcija Najprej si bomo pogledali kompleksno periodično funkcijo, ki smo jo spoznali in definirali že zgoraj. Funkcija je definirana z $N=10$ harmonskimi komponentami $c_n$, ki so v spodnjem primeru znane; tukaj bomo pokazali, da nas uporaba zgoraj definiranih enačb vodi v točno identifikacijo le-teh.
###Code
import numpy as np
import matplotlib.pyplot as plt
Tp = 2
t, dt = np.linspace(-0.5*Tp,2.5*Tp,301, retstep=True)
a0 = 1.
N = 10
seed = 0
rg = np.random.default_rng(seed)
a = rg.normal(size=N)*1/np.arange(1,N+1)**2 #skaliranje na koncu, da imajo višje komponente imajo manjšo amplitudo
b = rg.normal(size=N)*1/np.arange(1,N+1)**2
c = np.zeros(2*N+1, dtype='complex')
c[N+1:] = 0.5*a-0.5j*b
c[N] = a0/2
c[:N] = np.conj(c[N+1:])[::-1]
x = np.zeros(len(t), 'complex')
for n in range(-N,N+1):
x += c[N+n]*np.exp(2j*np.pi*n*t/Tp)
x = np.real(x) # teoretično pričakujemo samo realno rezultat (imaginarni del mora biti na nivoju numerične napake)
fig, ax1 = plt.subplots()
plt.title(f'Kompleksno periodična funkcija')
ax1.set_xlabel('$t\\quad $[s]')
ax1.set_ylabel('$x(t)\quad$[-]')
ax1.tick_params(axis='y')
ax1.vlines(0, -2, 2, 'k', lw=0.5)
ax1.hlines(0, np.min(t), np.max(t), 'k', lw=0.5)
ax1.plot(t, x, label='Definicija v kompleksni obliki', c='C0', lw=2, alpha=1)
ax1.vlines([0, Tp], -2, 2, ls='-.', lw=1)
ax1.hlines(a0/2, np.min(t), np.max(t), ls='-.', lw=1)
ax1.legend(loc=1)
plt.ylim(-1,2)
plt.show()
###Output
_____no_output_____
###Markdown
V primeru zgoraj je peridočna funkcija generirana na podlagi vektorja Fourierovih koeficientov:
###Code
c
###Output
_____no_output_____
###Markdown
Opazimo, da je vektor kompleksno konjugiran okoli središčnega elementa. Poljubni element lahko zgolj na podlagi časovne vrste $x(t)$ določimo tako, da sledimo definiciji:$$c_n = \frac{1}{T_p}\,\int_0^{T_p} x(t)\,e^{-\mathrm{i}\,2\pi\,n\,t/T_p}\,\mathrm{d}t$$kar je v numerični implementaciji (za `n=1`):
###Code
n=1 # poskusite še druge vrednosti, tudi n>N!
sel = np.logical_and(t>=0, t<=Tp)
np.trapz(x[sel]*np.exp(-2j*np.pi*n*t[sel]/Tp), dx=dt)/Tp
###Output
_____no_output_____
###Markdown
Pri tem smo pazili, da smo izbrali (`sel`) samo eno periodo v časovni vrsti. Tukaj velja poudariti, da moramo integrirati po celotni periodi. Rezultat je **točen** do nivoja natančnosti zapisa v računalnik.Vredno je opozoriti, da moramo biti zelo natančni pri implementaciji izpeljanih izrazov. Hitro lahko dobimo *zelo dober* rezultat; vendar dokler ni *točen* do nivoja natančnosti zapisa v računalnik, do takrat je nekje v implementaciji napaka. Napako bi lahko naredili, če bi naša diskretizacija bila takšna, da ne bi ključevala točke pri 0s in točke pri $T_p$. Tako napako lahko preiskusite tako, greste par vrstic navzgor v definicijo slike in vrstico:```pythont, dt = np.linspace(-0.5*Tp,2.5*Tp,301, retstep=True)```spremenite v (301 v 300):```pythont, dt = np.linspace(-0.5*Tp,2.5*Tp,300, retstep=True)``` Odvod / Integral kompleksno periodične funkcijeNadaljujemo z zgledom zgoraj in si najprej poglejmo, da je odvod po času res enak Fourierovi vrsti, kjer smo vsakega od členov preprosto odvajali.
###Code
import numpy as np
import matplotlib.pyplot as plt
Tp = 2
t, dt = np.linspace(-0.5*Tp,2.5*Tp,301, retstep=True)
a0 = 1.
N = 10
seed = 0
rg = np.random.default_rng(seed)
a = rg.normal(size=N)*1/np.arange(1,N+1)**2 #skaliranje na koncu, da imajo višje komponente imajo manjšo amplitudo
b = rg.normal(size=N)*1/np.arange(1,N+1)**2
c = np.zeros(2*N+1, dtype='complex')
c[N+1:] = 0.5*a-0.5j*b
c[N] = a0/2
c[:N] = np.conj(c[N+1:])[::-1]
x = np.zeros(len(t), 'complex')
for n in range(-N,N+1):
x += c[N+n]*np.exp(2j*np.pi*n*t/Tp)
x = np.real(x) # teoretično pričakujemo samo realno rezultat (imaginarni del mora biti na nivoju numerične napake)
x_d_t = np.gradient(x,dt)
x_d_v = np.zeros(len(t), 'complex') # ODVOD Z VRSTAMI
for n in range(-N,N+1):
x_d_v += c[N+n]*np.exp(2j*np.pi*n*t/Tp)*2j*np.pi*n/Tp
x_d_v = np.real(x_d_v) # teoretično pričakujemo samo realno rezultat (imaginarni del mora biti na nivoju numerične napake)
fig, ax1 = plt.subplots()
ax1.set_xlabel('$t\\quad $[s]')
ax1.vlines([0, Tp], -4, 4, 'k', lw=0.5)
ax1.plot(t, x_d_t, label='$\dot{x}(t)$ - izračunano v času', c='C0', lw=2, alpha=1)
ax1.plot(t, x_d_v, label='$\dot{x}(t)$ - vrste', ls=':', c='C1', lw=2, alpha=1)
ax1.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Zgornji primer lahko nadaljujemo in izvorni pomik $x(t)$ identificiramo iz Fourierovih vrst za hitrost $\dot x(t)$, glejte kodo in sliko spodaj. Pri integriranju imamo težavo v primeru Fourierovega koeficienta s frekvenco 0 (statična komponenta); v kodi spodaj to frekvenco enostavno preskočimo, saj je ne moremo določiti. Podobno težavo bi imeli v času; če integriramo nedoločeni integral, potem pri integriranju rabimo začetno vrednost. Na sliki spodaj se zato med obema pristopoma pokaže odstopanje v velikosti statične komponente.
###Code
#x_d_t je hitrost, a_0 pa statična komponenta prenešena iz primera zgoraj
N = 10 # identifikacija Fourierovih koeficientov hitrosti
c_d = np.zeros(2*N+1, dtype='complex')
n = np.arange(-N,N+1)
sel = np.logical_and(t>=0, t<=Tp)
for i in n:
c_d[i+N] = np.trapz(x_d_t[sel]*np.exp(-2j*np.pi*i*t[sel]/Tp), dx=dt)/Tp
x_v = np.zeros(len(t), 'complex') # INTEGRAL Z VRSTAMI
for n in range(-N,N+1):
if n!=0:
x_v += c_d[N+n]*np.exp(2j*np.pi*n*t/Tp)/(2j*np.pi*n/Tp)
x_v = np.real(x_v) # teoretično pričakujemo samo realno rezultat (imaginarni del mora biti na nivoju numerične napake)
fig, ax1 = plt.subplots()
ax1.set_xlabel('$t\\quad $[s]')
ax1.plot(t, x, label='$x(t)$ - definicija', c='C0', lw=2, alpha=1)
ax1.plot(t, x_v, label='$x(t)$ - vrste iz koeficientov FV za hitrost $\dot{c}$', ls=':', c='C1', lw=2, alpha=1)
ax1.hlines(a0/2, np.min(t), np.max(t), ls='-.', lw=1)
ax1.hlines(0, np.min(t), np.max(t), ls='-.', color='C1', lw=1)
ax1.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Periodična funkcijaTukaj si bomo pogledali periodično funkcijo, ki pa ni kompleksno harmonska. Primer takšne funkcije je naraščajoča žaga prikazana na spodnji sliki.
###Code
import numpy as np
import matplotlib.pyplot as plt
Tp = 2
t, dt = np.linspace(-0.5*Tp,2.5*Tp,301, retstep=True)
x = t%Tp
fig, ax1 = plt.subplots()
ax1.set_xlabel('$t\\quad $[s]')
ax1.set_ylabel('$x(t)\quad$[-]', color='C1')
ax1.tick_params(axis='y', labelcolor='C1')
ax1.vlines(0, -0.5, 2.5, 'k', lw=0.5)
ax1.hlines(0, np.min(t), np.max(t), 'k', lw=0.5)
ax1.plot(t, x, label='Periodični podatki', c='C0', lw=2, alpha=1)
ax1.legend(loc=1)
plt.ylim(-0.5,2.5)
plt.show()
###Output
_____no_output_____
###Markdown
Kljub temu, da funkcija ni kompleksno periodična, teorija Fourierovih vrst pa je bila izpeljana ob tej predpostavki, poskusimo identificirati harmonske komponente. Najprej to naredimo na diskretni časovni vrsti.Osredotočimo se na del podatkov, ki se ponavlja:
###Code
import numpy as np
import matplotlib.pyplot as plt
Tp = 2
t, dt = np.linspace(0,Tp,101, retstep=True)
x = t
fig, ax1 = plt.subplots()
ax1.set_xlabel('$t\\quad $[s]')
ax1.set_ylabel('$x(t)\quad$[-]', color='C1')
ax1.tick_params(axis='y', labelcolor='C1')
ax1.vlines(0, -0.5, 2.5, 'k', lw=0.5)
ax1.hlines(0, np.min(t), np.max(t), 'k', lw=0.5)
ax1.plot(t, x, label='Periodični podatki', c='C0', lw=2, alpha=1)
ax1.legend(loc=1)
plt.ylim(-0.5,2.5)
plt.show()
###Output
_____no_output_____
###Markdown
In identificiramo Fourierove koeficiente:
###Code
N = 10
c = np.zeros(2*N+1, dtype='complex')
n = np.arange(-N,N+1)
for i in n:
c[i+N] = np.trapz(x*np.exp(-2j*np.pi*i*t/Tp), dx=dt)/Tp
c
###Output
_____no_output_____
###Markdown
Na podlagi identificiranih Fourierovih koeficientov, lahko definiramo kompleksno periodično funkcijo:$$x(t)=\sum_{n=-N}^{N}c_n\,e^{\mathrm{i}\,2\pi\,n\,t/T_p},\qquad c_n=c_{-n}^*.$$Konkretna numerična koda je prikazana spodaj (pri tem smo uporabili novo numerično polje `x_r`, da ga ločimo od podatkov v obliki `x`):
###Code
x_r = np.zeros(len(t), 'complex')
for n in range(-N,N+1):
x_r += c[N+n]*np.exp(2j*np.pi*n*t/Tp)
x_r = np.real(x_r) # teoretično pričakujemo samo realno rezultat (imaginarni del mora biti na nivoju numerične napake)
fig, ax1 = plt.subplots()
ax1.set_xlabel('$t\\quad $[s]')
ax1.set_ylabel('$x(t)\quad$[-]')
ax1.vlines(0, -0.5, 2.5, 'k', lw=0.5)
ax1.hlines(0, np.min(t), np.max(t), 'k', lw=0.5)
ax1.plot(t, x, label='Izvorni periodični podatki', c='C0', lw=2, alpha=1)
ax1.plot(t, x_r, label='Kompleksno periodična funkcija', c='C1', lw=2, alpha=.6)
ax1.legend(loc=1)
plt.ylim(-0.5,2.5)
plt.show()
###Output
_____no_output_____
###Markdown
Opazimo, da identificirana kompleksno harmonska funkcija relativno dobro opiše izvorne periodične podatke. Če bi število harmonskih ($N$) komponent povečali, bi se identificirana funkcija izvorni bolje prilegala, vendar pa bi na mestu nezveznosti še vedno imeli odstopanje. Odstopanje na mestu nezveznosti je znano pod terminom **Gibbsov** fenomen. Resnici na ljubo je večina inženirskih aplikacij zveznih in zato ponavadi Gibbsov fenomen v praksi nima večjega pomena. Analitična obravnava periodične funkcije Zgoraj smo Fourierovo vrsto izračunali na diskrenih podatkih in smo zato omejeni koliko harmonskih komponent lahko izračunamo (`N<len(t)/2`; zakaj je $N$ vezan na dolžino časovne vrste, bomo spoznali pozneje). Tukaj si bomo pogledali obravnavo zveznih sistemov in s pomočjo strojnega izpeljevanja določili Fourierovo vrsto. Kot primer periodične funkcija si bomo izbrali naraščajočo žago, ki bo proporcionalna s časom `t`, perioda bo `Tp`:
###Code
import sympy as sym
sym.init_printing()
t, T_p = sym.symbols('t, T_p', real=True)
x_r = sym.fourier_series(t, limits=(t, 0, T_p))
###Output
_____no_output_____
###Markdown
Vrsta sestavljena iz prvih 5 členov torej je:
###Code
x_r.truncate(5)
###Output
_____no_output_____
###Markdown
Posamezni člen lahko izračunamo tudi sami:
###Code
n = 1
c_n = sym.integrate(t*sym.exp(-sym.I*2*n*sym.pi*t/T_p), (t, 0, T_p)).simplify()/T_p
c_n
###Output
_____no_output_____
###Markdown
Da dobimo harmonsko komponento, moramo `c_n` pomnožiti s pripadajočo pozitivno in negativno frekvenco:
###Code
sym.simplify(c_n*sym.exp(sym.I*2*sym.pi*n*t/T_p)+sym.conjugate(c_n)*sym.exp(-sym.I*2*sym.pi*n*t/T_p))
###Output
_____no_output_____
###Markdown
In slika pri `T_p=2` in različnem številu členov v vrsti:
###Code
import sympy as sym
sym.init_printing()
t, T_p = sym.symbols('t, T_p')
x_r = sym.fourier_series(t, limits=(t, 0, T_p))
p0 = sym.plot(t, (t, 0 ,2), label='x(t)=t', show=False, line_color='k', ylabel='x(t)', legend=True)
p1 = sym.plot(x_r.truncate(5).subs(T_p,2), (t, 0 ,2), line_color='C0', label='5 členov', show=False)
p2 = sym.plot(x_r.truncate(20).subs(T_p,2), (t, 0 ,2), line_color='C1', label='20 členov', show=False)
p0.append(p1[0])
p0.append(p2[0])
p0.show()
###Output
_____no_output_____
###Markdown
(sec:dirac_cas)= Diracova delta funkcija :::{note}Diracova delta funkcija je definirana z:$$\delta(t)=0 \quad\textrm{ za } t\ne 0 \qquad\textrm{ in } \qquad \int_{-\infty}^{+\infty}\delta(t)\,\textrm{d}t=1.$$:::Opomba: dejansko to ni navadna funkcije, ampak t.i. generalizirana funkcija. Ker je integral Diracove delta funkcije 1, se včasih imenuje tudi funkcija enotskega impulza, definirana tudi kot:$$\delta_{\varepsilon}=\begin{cases}\frac{1}{\varepsilon}&\textrm{ za }\quad -\frac{\varepsilon}{2}\le t\le+\frac{\varepsilon}{2}\\0& \textrm{sicer}.\end{cases}.$$ Pri definiranju enotskega impulza si lahko pomagamo tudi z **koračno funkcijo** (**Heaviside funkcija**), ki je definirana kot:$$u(t)=\begin{cases}1&\textrm{ za }\quad t>0\\0& \textrm{sicer}.\end{cases}.$$Dve koračni funkciji ste definirani spodaj:
###Code
import sympy as sym
t, ε = sym.symbols(r't, \varepsilon')
unit_step = sym.Heaviside(t+ε/2)
p1 = sym.plot(unit_step.subs(ε, +0.5), (t,-1,1), line_color='C0', line_style=':',
show=False, label='Koračna funkcija $u(t+\\varepsilon/2)$')
p2 = sym.plot(-unit_step.subs(ε, -0.5), (t,-1,1), line_color='C1',
show=False, label='Koračna funkcija $-u(t-\\varepsilon/2)$')
p1.extend(p2)
p1.legend = True
p1.show()
###Output
_____no_output_____
###Markdown
Do enotskega impulza pridemo z uteženim seštevanjem dveh enotskih korakov:$$\delta_{\varepsilon}=\frac{1}{\varepsilon}\Big(u\big(t+\frac{\varepsilon}{2}\big)-u\big(t-\frac{\varepsilon}{2}\big)\Big)$$ Poglejmo si kako koračno funkcijo pretvorimo v enotski impulz:
###Code
import sympy as sym
t, ε = sym.symbols(r't, \varepsilon')
podatki = {ε: 0.5}
unit_step = sym.Heaviside(t+ε/2) # tukaj moramo pravilno normirati (H0), glej help!
δ_ε = 1/ε * (unit_step-unit_step.subs(ε, -ε))
δ_ε
p1 = sym.plot(δ_ε.subs(podatki), (t,-.5,.5), line_color='C0', line_style=':',
ylabel='$\\delta_{\\varepsilon}(t)$')
###Output
_____no_output_____
###Markdown
in nato integriramo (rezultat je pričakovano enak 1; spremenite $\varepsilon$ v `podatki` zgoraj!):
###Code
sym.integrate(δ_ε.subs(podatki), (t, -sym.oo, sym.oo))
###Output
_____no_output_____
###Markdown
Med Diracovo delta funkcijo in enotskim korakom velja tudi sledeča povezava ($u(t)$ je koračna funkcija):$$\delta(t)=\lim_{\varepsilon\rightarrow 0}\delta_{\varepsilon}(t)=\frac{\textrm{d}}{\textrm{d}t}u(t).$$ (sec:dirac_lastnosti)= Lastnosti Diracove delta funkcije:::{note}1. je soda: $\delta(t)=\delta(-t)$1. *sejalna lastnost*, to pomeni, da integral navadne funkcije $x(t)$ in premaknjene Diracove funkcije $\delta(t-a)$ vrne vrednost funkcije $x(a)$:$\int_{-\infty}^{+\infty}x(t)\,\delta(t-a)\,\textrm{d}t= x(a)$.1. $\int_{-\infty}^{+\infty}e^{\pm\textrm{i}\,2\,\pi\,a\,t}\,\textrm{d}t= \delta(a)$ ali tudi: $\int_{-\infty}^{+\infty}e^{\pm\textrm{i}\,a\,t}\,\textrm{d}t= 2\,\pi\,\delta(a)$, za dokaz glejte {cite:t}`bendat_2011,shin2008` (str. 40).1. $\delta(a\,t)=\delta(t)/|a|$.1. $\int_{-\infty}^{+\infty}f(t)\,\delta^{(n)}(t)\,\textrm{d}t= (-1)^n\,f^{(n)}(a)$, kjer $n$ označuje odvod.::: Padajoča žaga in njen odvod Predpostavimo periodično padajočo žagasto funkcijo, kot je prikazana spodaj:
###Code
import sympy as sym
sym.init_printing()
t, T_p = sym.symbols('t, T_p')
podatki = {T_p: 2}
x = 1-t/T_p
x_r = sym.fourier_series(x, limits=(t, 0, T_p))
p0 = sym.plot((x).subs(podatki), (t, 0 ,2), label='x(t)=t', show=False, line_color='k', ylabel='x(t)', legend=True)
p1 = sym.plot(x_r.truncate(5).subs(podatki), (t, 0 ,2), line_color='C0', label='5 členov', show=False)
p2 = sym.plot(x_r.truncate(20).subs(podatki), (t, 0 ,2), line_color='C1', label='20 členov', show=False)
p0.append(p1[0])
p0.append(p2[0])
p0.show()
###Output
_____no_output_____
###Markdown
Vrsta padajoče žagaste funkcije je definirana kot:$$x(t) = \frac{1}{2}+\frac{1}{\pi}\sum_{n=1}^{\infty}\frac{1}{n}\sin\Big(\frac{2\pi\,n\,t}{T_p}\Big)$$Ali s pomočjo strojnega izpeljevanja:
###Code
import sympy as sym
sym.init_printing()
t, T_p = sym.symbols('t, T_p')
podatki = {T_p: 2}
x = 1-t/T_p
x_r = sym.fourier_series(x, limits=(t, 0, T_p))
x_r.truncate(5)
###Output
_____no_output_____
###Markdown
Če odvajamo padajočo žagasto funkcijo v času, je rezultat enak:$$\dot x(t) = -\frac{1}{T_p} + \sum_{n=-\infty}^{+\infty}\delta(t-n\,T_p)$$ker pa lahko odvajamo tudi vrsto, velja tudi:$$\dot x(t) = \frac{2}{T_p}\sum_{n=1}^{\infty}\cos\Big(\frac{2\pi\,n\,t}{T_p}\Big)$$enačenje obeh izrazov vodi v izraz::::{note}Periodični *vlak impulzov* (ang. *train of impulses*) ima v obliki Fourierovih vrst konstantne koeficiente ($2/T_p$):$$\sum_{n=-\infty}^{+\infty}\delta(t-n\,T_p) = \frac{1}{T_p} +\frac{2}{T_p}\sum_{n=1}^{\infty}\cos\Big(\frac{2\pi\,n\,t}{T_p}\Big)$$:::Slednjo ugotovitev bomo pozneje potrebovali, ko bomo obravnavali vzorčenje signalov.
###Code
import sympy as sym
sym.init_printing()
t, T_p = sym.symbols('t, T_p')
podatki = {T_p: 2}
x = 1-t/T_p
x_r = sym.fourier_series(x, limits=(t, 0, T_p))
p0 = sym.plot(sym.diff(x_r.truncate(20),t).subs(podatki), (t, 0 ,6), line_color='C1', label='20 členov', show=False,
ylabel='Vrsta: $\\dot x(t)$', legend=True, adaptive=False, nb_of_points=800)
p1 = sym.plot(sym.diff(x_r.truncate(5),t).subs(podatki), (t, 0 ,6), line_color='C0', label='5 členov', show=False)
p0.append(p1[0])
p0.show()
###Output
_____no_output_____
###Markdown
Amplitudni in fazni spekter Fourierovi koeficienti $c_n$, ki jih obravnavamo pri Fourierovih vrstah, predstavljajo amplitudno in fazno informacijo o določeni harmonski komponenti. Poglejmo si primer periodičnega pravokotnega vala: slika spodaj.
###Code
import numpy as np
import matplotlib.pyplot as plt
# signal
T_p = 2
n = 200
x = np.ones(n)
x[:n//4] = -1.
x[n//4:2*n//4] = 1.
x[2*n//4:3*n//4] = -1.
x[3*n//4:n] = 1.
t = np.arange(n)*2*T_p/(n-1) - T_p/2
dt = t[1]-t[0]
# zakaj je ta koda napačna?
# t, dt = np.linspace(-0.5*Tp,1.5*Tp,201, retstep=True)
# x = np.sign(np.sin(2*np.pi*t/Tp))
# Fourierove vrste
N = 10
c = np.zeros(2*N+1, dtype='complex')
n = np.arange(-N,N+1)
#omejiti se moramo samo na eno periodo, sicer koeficienti niso pravilno izračunani
sel = np.logical_and(t>=0,t<=T_p)
for i in n:
c[i+N] = np.trapz(x[sel]*np.exp(-2j*np.pi*i*t[sel]/T_p), dx=dt)/T_p
# rekonstrukcija
x_r = np.zeros(len(t), 'complex')
for _ in range(-N,N+1):
x_r += c[N+_]*np.exp(2j*np.pi*_*t/T_p)
x_r = np.real(x_r)
fig, ax1 = plt.subplots()
ax1.set_xlabel('$t\\quad $[s]')
ax1.set_ylabel('$x(t)\quad$[-]')
ax1.tick_params(axis='y')
ax1.plot(t, x, label='Periodični podatki', c='C0', lw=2, alpha=1)
ax1.plot(t, x_r, label='Rekonstukcija preko Fourierovih vrst', c='C1', lw=2, alpha=1)
ax1.legend(loc=4)
plt.show()
###Output
_____no_output_____
###Markdown
Pri pripravi podatkov zgoraj smo izračunali Fourierove koeficiente $c$:
###Code
c
###Output
_____no_output_____
###Markdown
Fourierove koeficiente lahko prikažemo v grafični obliki. Če prikažemo amplitudo kompleksnega števila $|c|$, potem govorimo o amplitudnem spektru: za določeni večkratnih osnovne frekvence $1/T_p$ prikažemo vsebnost amplitude harmonske komponente. Podobno govorimo o faznem spektru, če prikažemo $\angle c_n$.
###Code
import numpy as np
import matplotlib.pyplot as plt
fig, ax1 = plt.subplots()
ax1.set_xlabel('$n\\quad $[-]')
ax1.set_ylabel('$|c_n|\\quad$[-]', color='C0')
ax1.tick_params(axis='y', labelcolor='C0')
ax1.vlines(0, -2, 2.5, 'k', lw=0.5)
ax1.hlines(0, np.min(n), np.max(n), 'k', lw=0.5)
ax1.plot(n, np.abs(c), 'o-', label='Amplituda $c_n$', c='C0', lw=2, alpha=1)
ax1.set_xticks(ticks=n)
ax2 = ax1.twinx()
ax2.plot(n, np.angle(c), 'o-', label='Faza $c_n$', c='C1', lw=2, alpha=1)
ax2.set_ylabel('$\\angle c_n \quad$[-]', color='C1')
ax2.tick_params(axis='y', labelcolor='C1')
ax2.set_ylim(-2.,2.)
plt.show()
###Output
_____no_output_____
###Markdown
(sec:parseval)= Parsevalov teorem in močnostni spekter Parsevalov teorem Če je $x(t)$ napetost merjena čez upor upornosti $1\Omega$, potem je trenutna moč enaka $P(t)=R\,x^2(t)$, povprečna moč periodičnega signala znotraj ene periode pa je:$$\frac{1}{T_p}\int_0^{T_p}x^2(t)\,\textrm{d}t$$ker velja $x(t)=\sum_{n=-\infty}^{\infty}c_n\,e^{\mathrm{i}\,2\pi\,n\,t/T_p}$ in $x^2(t)=x(t)\,x^*(t)$ je povprečna moč definirana tudi kot:$$\frac{1}{T_p}\int_0^{T_p}x^2(t)\,\textrm{d}t=\frac{1}{T_p}\int_0^{T_p}\sum_{n=-\infty}^{\infty}c_n\,e^{\mathrm{i}\,2\pi\,n\,t/T_p}\,\sum_{m=-\infty}^{\infty}c_m^*\,e^{-\mathrm{i}\,2\pi\,m\,t/T_p}\,\textrm{d}t$$kar poenostavimo v:$$\frac{1}{T_p}\int_0^{T_p}x^2(t)\,\textrm{d}t=\frac{1}{T_p}\sum_{n=-\infty}^{\infty}\,\sum_{m=-\infty}^{\infty}c_n\,c_m^*\,\int_0^{T_p}e^{\mathrm{i}\,2\pi\,(n-m)\,t/T_p}\,\textrm{d}t$$ Integral harmonske funkcije na zaključeni periodi je vedno nič razen, ko je frekvenca 0. Ko je frekvenca enaka 0, je podintegralska funkcija enaka 1 in integral $T_p$:$$\int_0^{T_p}e^{\mathrm{i}\,2\pi\,(n-m)\,t/T_p}\,\textrm{d}t=\begin{cases}T_p,&\textrm{če}\quad m=n,\\0,&\textrm{sicer.}\\\end{cases}$$:::{note}Sledi **Parsevalov teorem** ali tudi Parsevalova enakost:$$\frac{1}{T_p}\int_0^{T_p}x^2(t)\,\textrm{d}t=\sum_{n=-\infty}^{\infty}c_n\,c_n^*,$$ki poveže povprečno moč v času z močjo v frekvenčni domeni.::: Poglejmo si konkreten zgled:
###Code
T_p = 2
t, dt = np.linspace(0,T_p,101, retstep=True)
a0 = 1.
N = 10
seed = 0
rg = np.random.default_rng(seed)
a = rg.normal(size=N)*1/np.arange(1,N+1)**2 #skaliranje na koncu, da imajo višje komponente imajo manjšo amplitudo
b = rg.normal(size=N)*1/np.arange(1,N+1)**2
c = np.zeros(2*N+1, dtype='complex')
c[N+1:] = 0.5*a-0.5j*b
c[N] = a0/2
c[:N] = np.conj(c[N+1:])[::-1]
x = np.zeros(len(t), 'complex')
for n in range(-N,N+1):
x += c[N+n]*np.exp(2j*np.pi*n*t/T_p)
x = np.real(x) # teoretično pričakujemo samo realno rezultat (imaginarni del mora biti na nivoju numerične napake)
###Output
_____no_output_____
###Markdown
Vrednost iz časa:
###Code
np.trapz(x**2,dx=dt)/T_p
###Output
_____no_output_____
###Markdown
Vrednost na podlagi Fourierovih koeficientov:
###Code
np.dot(c,np.conjugate(c))
###Output
_____no_output_____
###Markdown
Po pričakovanjih dobimo enako vrednost (do nivoja numerične napake)! Močnostni spekter Parsevalov teorem povprečno moč definira kot vsoto kvadratov Fourierovih koeficientov oz. frekvenčnih komponent. Prikaz kvadratov posameznih frekvenčnih komponent imenujemo močnostni spekter. Pri tem je treba poudariti, da močnostni spekter izgubi informacijo o fazi in je vedno definiran z realnimi vrednostmi.
###Code
import numpy as np
import matplotlib.pyplot as plt
# signal
T_p = 2
t, dt = np.linspace(-0.5*T_p,1.5*T_p,201, retstep=True)
x = np.sign(np.sin(2*np.pi*t/T_p))
# Fourierove vrste
N = 10
c = np.zeros(2*N+1, dtype='complex')
n = np.arange(-N,N+1)
#omejiti se moramo samo na eno periodo, sicer koeficienti niso pravilno izračunani
sel = np.logical_and(t>=0,t<=T_p)
for i in n:
c[i+N] = np.trapz(x[sel]*np.exp(-2j*np.pi*i*t[sel]/T_p), dx=dt)/T_p
fig, ax1 = plt.subplots()
ax1.set_xlabel('$n\\quad $[-]')
ax1.set_ylabel('$|c_n|^2\\quad$[-]')
ax1.plot(n, np.real(c*np.conjugate(c)), 'o', label='Amplituda $c_n$', c='C0', lw=2, alpha=1)
ax1.set_xticks(ticks=n)
plt.show()
###Output
_____no_output_____ |
tutorials/module2-numerics/01_hypersolver_odeint.ipynb | ###Markdown
Basic usage of the `Hypersolver` API We showcase the `torchdyn` API for [hypersolvers](https://arxiv.org/pdf/2007.09601.pdf). Hypersolvers, or hybrid ODE solvers equipped with a neural network to approximate residuals, are seamlessly integrated into the `odeint` API. A major design of the API is exactly preserving a persistent state for the solver, which in the case of standard ODE solvers contains the Tableau. For hypersolvers, the state also contains their hypernetwork parameters.
###Code
import time
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from torchdyn.core import NeuralODE
from torchdyn.datasets import *
from torchdyn.numerics import odeint, Euler, HyperEuler
%load_ext autoreload
%autoreload 2
# quick run for automated notebook validation
dry_run = False
class Lorenz(nn.Module):
def __init__(self):
super().__init__()
self.p = nn.Linear(1,1)
def forward(self, t, x):
x1, x2, x3 = x[...,:1], x[...,1:2], x[...,2:]
dx1 = 10 * (x2 - x1)
dx2 = x1 * (28 - x3) - x2
dx3 = x1 * x2 - 8/3 * x3
return torch.cat([dx1, dx2, dx3], -1)
x0 = torch.randn(512, 3) + 15 # solve 512 IVPs in parallel!
t_span = torch.linspace(0, 5, 4000)
sys = Lorenz()
if dry_run: t_eval, sol_gt = odeint(sys, x0, t_span, solver='tsit5', atol=1e-3, rtol=1e-3)
else: t_eval, sol_gt = odeint(sys, x0, t_span, solver='tsit5', atol=1e-8, rtol=1e-8)
fig = plt.figure(figsize=(15, 4))
axs = fig.subplots(3,1)
axs[0].plot(sol_gt[:,:4,0], c='black'); axs[1].plot(sol_gt[:,:4,1], c='r'); axs[2].plot(sol_gt[:,:4,2], c='b');
###Output
_____no_output_____
###Markdown
Can we speed up simulation?
###Code
class VanillaHyperNet(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
for p in self.net.parameters():
torch.nn.init.uniform_(p, 0, 1e-5)
def forward(self, t, x):
return self.net(x)
net = nn.Sequential(nn.Linear(3, 64), nn.Softplus(), nn.Linear(64, 64), nn.Softplus(), nn.Linear(64, 3))
hypersolver = HyperEuler(VanillaHyperNet(net))
t_eval, sol = odeint(sys, x0, t_span, solver=hypersolver, atol=1e-5, rtol=1e-5)
sol = sol.detach()
fig = plt.figure(figsize=(15, 14))
axs = fig.subplots(6,1)
axs[0].plot(sol[:,:4,0], c='black'); axs[2].plot(sol[:,:4,1], c='r'); axs[4].plot(sol[:,:4,2], c='b');
axs[0].set_xticks([]); axs[2].set_xticks([]); axs[4].set_xticks([])
# The error accumulates as Euler (base solver of HyperEuler) does not converge here.
# Here we plot the errors (mean across batches of initial conditions)
axs[1].plot((sol-sol_gt)[:,:4,0].abs().mean(1), c='black')
axs[1].set_title('Error on $x_0$')
axs[3].plot((sol-sol_gt)[:,:4,0].abs().mean(1), c='r')
axs[3].set_title('Error on $x_1$')
axs[5].plot((sol-sol_gt)[:,:4,0].abs().mean(1), c='b');
axs[5].set_title('Error on $x_2$')
###Output
_____no_output_____
###Markdown
Train the Hypersolver We can train our hypersolver on residuals between ground-truth solution and base solver (in this case Euler)
###Code
base_solver = Euler()
X = sol_gt[:-1].reshape(-1, 3)
X_next_gt = sol_gt[1:].reshape(-1, 3)
# step forward (fixed-step, time-invariant system hence any `t` as first argument is fine) with base solver
dt = t_span[1] - t_span[0]
_, X_next, _ = base_solver.step(sys, X, 0., dt) # step returns a Tuple (k1, berr, sol). The first two values are used internally
# within `odeint`
residuals = (X_next_gt - X_next) / dt**2
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = nn.DataParallel(hypersolver, device_ids=[0,1]) # feel free to change here according to your setup and GPU available.
model = model.to(device)
X = X.to(device)
residuals = residuals.to(device)
# training will take a while...
opt = torch.optim.Adadelta(model.parameters(), lr=3e-4)
if dry_run: STEPS = 2
else: STEPS = 300000
loss_func = nn.MSELoss()
hypernet = model.module.hypernet
for k in range(STEPS):
residuals_hypersolver = hypernet(0., X)
loss = loss_func(residuals, residuals_hypersolver)
print(f'Step: {k}, Residual loss: {loss:.3f}', end='\r')
loss.backward(); opt.step(); opt.zero_grad()
# roughly twice as fast as a rk4 (3 orders higher). How does the accuracy compare to the base solver (and others)?
# sampling different initial conditions (off distribution to test generalization)
x0 = torch.randn(5, 3) + 14 # solve 5 IVPs in parallel!
hypersolver = hypersolver.to(device)
x0 = x0.to(device)
t0 = time.time()
t_eval, sol = odeint(sys, x0, t_span, solver=hypersolver)
sol = sol.detach().cpu()
hyper_sol_time = time.time() - t0
t0 = time.time()
t_eval, base_sol = odeint(sys, x0, t_span, solver='euler')
base_sol = base_sol.detach().cpu()
base_sol_time = time.time() - t0
t0 = time.time()
t_eval, rk4_sol = odeint(sys, x0, t_span, solver='rk4')
rk4_sol = rk4_sol.detach().cpu()
rk4_sol_time = time.time() - t0
t0 = time.time()
t_eval, dp5_low_sol = odeint(sys, x0, t_span, solver='dopri5', atol=1e-3, rtol=1e-3)
dp5_low_sol = dp5_low_sol.detach().cpu()
dp5_low_time = time.time() - t0
if dry_run: t_eval, sol_gt = odeint(sys, x0, t_span, solver='tsit5', atol=1e-2, rtol=1e-2)
else: t_eval, sol_gt = odeint(sys, x0, t_span, solver='tsit5', atol=1e-6, rtol=1e-6)
sol_gt = sol_gt.detach().cpu()
def smape(yhat, y):
return torch.abs(yhat - y) / (torch.abs(yhat) + torch.abs(y)) / 2
error_hyper = smape(sol_gt, sol).mean(1).sum(1)
error_base = smape(sol_gt, base_sol).mean(1).sum(1)
error_rk4 = smape(sol_gt, rk4_sol).mean(1).sum(1)
error_dp5 = smape(sol_gt, dp5_low_sol).mean(1).sum(1)
fig = plt.figure(figsize=(16,4))
ax = fig.add_subplot(1,4,1)
for i in range(4):
for j in range(3):
ax.plot(sol[:,i,j], c='r', alpha=0.4)
ax.plot(dp5_low_sol[:,i,j], c='black')
ax.plot(sol_gt[:,i,j], c='blue')
ax.legend(['HyperEuler', 'Other'])
ax = fig.add_subplot(1,4,2)
for i in range(4):
ax.plot(sol[:,i,0], sol[:,i,2], c='r')
ax.plot(dp5_low_sol[:,i,0], dp5_low_sol[:,i,2], c='black')
ax.plot(sol_gt[:,i,0], sol_gt[:,i,2], c='blue')
ax = fig.add_subplot(1,4,3)
ax.plot(error_hyper, c='r')
ax.plot(error_base, c='green')
ax.plot(error_rk4, c='orange')
ax.plot(error_dp5, c='black')
ax.set_yscale('log')
ax.legend(['HyperEuler', 'Euler', 'RK4', 'DP45(low)'])
ax.set_title('Errors, logscale')
times = [hyper_sol_time, base_sol_time, rk4_sol_time, dp5_low_time]
ax = fig.add_subplot(1,4,4)
colors = ['r', 'g', 'orange', 'black']
for k in range(4):
barlist = ax.bar(0 + k, times[k:k+1], color=colors[k], alpha=0.4, edgecolor='black')
ax.legend(['HyperEuler', 'Euler', 'RK4', 'DP45(low)'])
ax.set_title('Wall-clock sol time')
###Output
_____no_output_____ |
7_GaussianBetaDeeper.ipynb | ###Markdown
Exploring the Encodings / Decodings
###Code
mnist = MNIST(os.getcwd(), transform=ToTensor())
mnist = torch.stack([x.view(-1) for x, _ in mnist])
mnist.shape
with torch.no_grad():
z, z_mu, z_scale, x, x_mu, x_scale = model(mnist)
z_std = 1e-6 + F.softplus(z_scale)
x_std = 1e-6 + F.softplus(x_scale)
z.shape, x.shape
###Output
_____no_output_____
###Markdown
Histograms of z, z_mu, z_std
###Code
plt.figure(figsize=(16, 8))
plt.hist(z.view(-1), bins=100)
plt.title(f'Z values across whole MNIST training set (n={len(z.view(-1)):,})')
plt.show()
plt.figure(figsize=(16, 8))
plt.hist(z_mu.view(-1), bins=100)
plt.title(f'Z Mu values across whole MNIST training set (n={len(z_mu.view(-1)):,})')
plt.show()
plt.figure(figsize=(16, 8))
plt.hist(z_std.view(-1), bins=100)
plt.title(f'Z Std values across whole MNIST training set (n={len(z_std.view(-1)):,})')
plt.show()
###Output
_____no_output_____
###Markdown
Histograms of x (orig), x_mu, x_std
###Code
plt.figure(figsize=(16, 8))
plt.hist(x.view(-1), bins=20)
plt.title(f'X original values across whole MNIST training set (n={len(x.view(-1)):,})')
plt.show()
plt.figure(figsize=(16, 8))
plt.hist(x_mu.view(-1), bins=100)
plt.title(f'X Mu values across whole MNIST training set (n={len(x_mu.view(-1)):,})')
plt.show()
plt.figure(figsize=(16, 8))
plt.hist(x_std.view(-1), bins=100)
plt.title(f'X Std values across whole MNIST training set (n={len(x_std.view(-1)):,})')
plt.show()
plt.figure(figsize=(16, 8))
plt.hist([x_mu.view(-1).numpy(), x.view(-1).numpy()], bins=20, label=['x_mu', 'x (orig)'])
plt.title('X Mu values vs Original X values across MNIST train set')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Exploring X Dist
###Code
xdist = Normal(x_mu, 1e-6 + F.softplus(x_scale))
xdist
r = xdist.sample()
r.shape, r.min(), r.max()
plt.imshow(r[0].reshape(28, 28))
plt.show()
plt.figure(figsize=(16, 8))
plt.hist(r.view(-1).numpy(), bins=100)
plt.show()
###Output
_____no_output_____
###Markdown
Experiments with Artificial Z Vectors
###Code
with torch.no_grad():
z_zero_mu, z_zero_scale = model.decode(torch.zeros(20))
z_zero_std = 1e-6 + F.softplus(z_zero_scale)
plt.figure(figsize=(16, 8))
plt.hist([z_zero_mu.numpy(), z_zero_std.numpy()], bins=20, label=['mu', 'std'])
plt.title("Mu and sigma of decodings for zero vector")
plt.legend()
plt.show()
plt.imshow(z_zero_mu.numpy().reshape(28, 28))
plt.show()
with torch.no_grad():
z_one_mu, z_one_scale = model.decode(torch.ones(20))
z_one_std = 1e-6 + F.softplus(z_one_scale)
plt.figure(figsize=(16, 8))
plt.hist([z_one_mu.numpy(), z_one_std.numpy()], bins=20, label=['mu', 'std'])
plt.title("Mu and sigma of decodings for ones vector")
plt.legend()
plt.show()
plt.imshow(z_one_mu.numpy().reshape(28, 28))
plt.show()
with torch.no_grad():
z_100_mu, z_100_scale = model.decode(torch.zeros(20) + 100)
z_100_std = 1e-6 + F.softplus(z_100_scale)
plt.figure(figsize=(16, 8))
plt.hist([z_100_mu.numpy(), z_100_std.numpy()], bins=20, label=['mu', 'std'])
plt.title("Mean / Std of Decodings for 100 Vector")
plt.legend()
plt.show()
plt.imshow(z_100_mu.numpy().reshape(28, 28))
plt.show()
with torch.no_grad():
z_n100_mu, z_n100_scale = model.decode(torch.zeros(20) - 100)
z_n100_std = 1e-6 + F.softplus(z_n100_scale)
plt.figure(figsize=(16, 8))
plt.hist([z_n100_mu.numpy(), z_n100_std.numpy()], bins=20, label=['mu', 'std'])
plt.title("Mean / Std of Decodings for -100 Vector")
plt.legend()
plt.show()
plt.imshow(z_n100_mu.numpy().reshape(28, 28))
plt.show()
plt.figure(figsize=(16, 8))
plt.hist([z_zero_mu.numpy(), z_one_mu.numpy()], bins=20, label=['zeros', 'ones'])
plt.title('Histogram of x_mu values generated from latent vectors of all zeros and all ones')
plt.legend()
plt.show()
with torch.no_grad():
ndist = torch.linspace(-3, 3, steps=10).expand(20, -1).T
dec_mu, dec_std = model.decode(ndist)
dec_std = 1e+6 + F.softplus(dec_std)
dec_mu.shape, dec_std.shape
plt.figure(figsize=(16, 8))
for val, x_ in zip(ndist, dec_mu.numpy()):
plt.hist(x_, bins=20, histtype='step', label=val[0].item())
plt.title('Histograms of x_mu values generated by latent vectors of a single value drawn linearly from -3.0 to 3.0')
plt.legend()
plt.show()
plt.figure(figsize=(16, 8))
for i, (val, x_) in enumerate(zip(ndist, dec_mu.numpy()), start=1):
plt.subplot(2, 5, i)
plt.imshow(x_.reshape(28, 28))
plt.xticks([])
plt.yticks([])
plt.title(f'{val[0].item():0.2f}')
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Look at Model Weights
###Code
model
with torch.no_grad():
w = model.z_loc.weight.clone()
plt.figure(figsize=(16, 8))
plt.hist(w.view(-1), bins=100)
plt.show()
with torch.no_grad():
e0 = model.encoder[0].weight.view(-1)
e3 = model.encoder[3].weight.view(-1)
em = model.z_loc.weight.view(-1)
es = model.z_scale.weight.view(-1)
ew = torch.cat((e0, e3, em, es))
ew.shape
plt.figure(figsize=(16, 8))
plt.hist(ew, bins=100)
plt.title(f'Encoder + z_loc / z_scale layer weights, n={len(ew):,}')
plt.show()
with torch.no_grad():
d0 = model.decoder[0].weight.view(-1)
d3 = model.decoder[3].weight.view(-1)
dm = model.x_loc.weight.view(-1)
ds = model.x_scale.weight.view(-1)
dw = torch.cat((d0, d3, dm, ds))
dw.shape
plt.figure(figsize=(16, 8))
plt.hist(dw, bins=100)
plt.title(f'Decoder + x_loc / x_scale layer weights, n={len(dw):,}')
plt.show()
###Output
_____no_output_____ |
examples/Notebooks/flopy3_ZoneBudget_example.ipynb | ###Markdown
FloPy ZoneBudget ExampleThis notebook demonstrates how to use the `ZoneBudget` class to extract budget information from the cell by cell budget file using an array of zones.First set the path and import the required packages. The flopy path doesn't have to be set if you install flopy from a binary installer. If you want to run this notebook, you have to set the path to your own flopy path.
###Code
import os
import sys
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
# run installed version of flopy or add local path
try:
import flopy
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('pandas version: {}'.format(pd.__version__))
print('flopy version: {}'.format(flopy.__version__))
# Set path to example datafiles
loadpth = os.path.join('..', 'data', 'zonbud_examples')
cbc_f = os.path.join(loadpth, 'freyberg.gitcbc')
###Output
_____no_output_____
###Markdown
Read File Containing ZonesUsing the `read_zbarray` utility, we can import zonebudget-style array files.
###Code
from flopy.utils import read_zbarray
zone_file = os.path.join(loadpth, 'zonef_mlt.zbr')
zon = read_zbarray(zone_file)
nlay, nrow, ncol = zon.shape
fig = plt.figure(figsize=(10, 4))
for lay in range(nlay):
ax = fig.add_subplot(1, nlay, lay+1)
im = ax.pcolormesh(zon[lay, :, :])
cbar = plt.colorbar(im)
plt.gca().set_aspect('equal')
plt.show()
###Output
_____no_output_____
###Markdown
Extract Budget Information from ZoneBudget ObjectAt the core of the `ZoneBudget` object is a numpy structured array. The class provides some wrapper functions to help us interogate the array and save it to disk.
###Code
# Create a ZoneBudget object and get the budget record array
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 1096))
zb.get_budget()
# Get a list of the unique budget record names
zb.get_record_names()
# Look at a subset of fluxes
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zb.get_budget(names=names)
# Look at fluxes in from zone 2
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zones = ['ZONE_2']
zb.get_budget(names=names, zones=zones)
# Look at all of the mass-balance records
names = ['TOTAL_IN', 'TOTAL_OUT', 'IN-OUT', 'PERCENT_DISCREPANCY']
zb.get_budget(names=names)
###Output
_____no_output_____
###Markdown
Convert UnitsThe `ZoneBudget` class supports the use of mathematical operators and returns a new copy of the object.
###Code
cmd = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 0))
cfd = cmd / 35.3147
inyr = (cfd / (250 * 250)) * 365 * 12
cmdbud = cmd.get_budget()
cfdbud = cfd.get_budget()
inyrbud = inyr.get_budget()
names = ['FROM_RECHARGE']
rowidx = np.in1d(cmdbud['name'], names)
colidx = 'ZONE_1'
print('{:,.1f} cubic meters/day'.format(cmdbud[rowidx][colidx][0]))
print('{:,.1f} cubic feet/day'.format(cfdbud[rowidx][colidx][0]))
print('{:,.1f} inches/year'.format(inyrbud[rowidx][colidx][0]))
cmd is cfd
###Output
_____no_output_____
###Markdown
Alias NamesA dictionary of {zone: "alias"} pairs can be passed to replace the typical "ZONE_X" fieldnames of the `ZoneBudget` structured array with more descriptive names.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=[1097.], aliases=aliases)
zb.get_budget()
###Output
_____no_output_____
###Markdown
Return the Budgets as a Pandas DataFrameSet `kstpkper` and `totim` keyword args to `None` (or omit) to return all times.The `get_dataframes()` method will return a DataFrame multi-indexed on `totim` and `name`.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_dataframes()
###Output
_____no_output_____
###Markdown
Slice the multi-index dataframe to retrieve a subset of the budget.NOTE: We can pass "names" directly to the `get_dataframes()` method to return a subset of reocrds. By omitting the `"FROM_"` or `"TO_"` prefix we get both.
###Code
dateidx1 = 1095.
dateidx2 = 1097.
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Look at pumpage (`TO_WELLS`) as a percentage of recharge (`FROM_RECHARGE`)
###Code
dateidx1 = 1095.
dateidx2 = 1097.
zones = ['SURF']
# Pull out the individual records of interest
rech = df.loc[(slice(dateidx1, dateidx2), ['FROM_RECHARGE']), :][zones]
pump = df.loc[(slice(dateidx1, dateidx2), ['TO_WELLS']), :][zones]
# Remove the "record" field from the index so we can
# take the difference of the two DataFrames
rech = rech.reset_index()
rech = rech.set_index(['totim'])
rech = rech[zones]
pump = pump.reset_index()
pump = pump.set_index(['totim'])
pump = pump[zones] * -1
# Compute pumping as a percentage of recharge
(pump / rech) * 100.
###Output
_____no_output_____
###Markdown
Pass `start_datetime` and `timeunit` keyword arguments to return a dataframe with a datetime multi-index
###Code
dateidx1 = pd.Timestamp('1972-12-29')
dateidx2 = pd.Timestamp('1972-12-30')
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(start_datetime='1970-01-01', timeunit='D', names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Pass `index_key` to indicate which fields to use in the multi-index (default is "totim"; valid keys are "totim" and "kstpkper")
###Code
df = zb.get_dataframes(index_key='kstpkper')
df.head()
###Output
_____no_output_____
###Markdown
Write Budget Output to CSVWe can write the resulting recarray to a csv file with the `.to_csv()` method of the `ZoneBudget` object.
###Code
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=[(0, 0), (0, 1096)])
f_out = os.path.join('data', 'Example_output.csv')
zb.to_csv(f_out)
# Read the file in to see the contents
try:
import pandas as pd
print(pd.read_csv(f_out).to_string(index=False))
except:
with open(fname, 'r') as f:
for line in f.readlines():
print('\t'.join(line.split(',')))
###Output
totim time_step stress_period name ZONE_1 ZONE_2 ZONE_3
1.0 0 0 FROM_STORAGE 0.000000 0.000000 0.000000
1.0 0 0 FROM_CONSTANT_HEAD 0.000000 0.000000 0.000000
1.0 0 0 FROM_WELLS 0.000000 0.000000 0.000000
1.0 0 0 FROM_DRAINS 0.000000 0.000000 0.000000
1.0 0 0 FROM_RECHARGE 6222.673300 18.062912 36.125824
1.0 0 0 FROM_ZONE_0 0.000000 0.000000 0.000000
1.0 0 0 FROM_ZONE_1 0.000000 4275.257300 491.945070
1.0 0 0 FROM_ZONE_2 2744.821800 0.000000 2115.654000
1.0 0 0 FROM_ZONE_3 451.545720 1215.952300 0.000000
1.0 0 0 TOTAL_IN 9419.041000 5509.272500 2643.725000
1.0 0 0 TO_STORAGE 0.000000 0.000000 0.000000
1.0 0 0 TO_CONSTANT_HEAD 821.283200 648.806100 976.233600
1.0 0 0 TO_WELLS 0.000000 0.000000 0.000000
1.0 0 0 TO_DRAINS 3832.150000 0.000000 0.000000
1.0 0 0 TO_RECHARGE 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_0 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_1 0.000000 2744.821800 451.545720
1.0 0 0 TO_ZONE_2 4275.257300 0.000000 1215.952300
1.0 0 0 TO_ZONE_3 491.945070 2115.654000 0.000000
1.0 0 0 TOTAL_OUT 9420.636000 5509.282000 2643.731400
1.0 0 0 IN-OUT 1.594727 0.009766 0.006348
1.0 0 0 PERCENT_DISCREPANCY 0.016929 0.000177 0.000240
1097.0 0 1096 FROM_STORAGE 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_CONSTANT_HEAD 0.000000 231.566590 86.217200
1097.0 0 1096 FROM_WELLS 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_DRAINS 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_RECHARGE 5145.581500 14.936376 29.872751
1097.0 0 1096 FROM_ZONE_0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_ZONE_1 0.000000 3475.123500 138.600450
1097.0 0 1096 FROM_ZONE_2 3269.318800 0.000000 1764.655300
1097.0 0 1096 FROM_ZONE_3 192.186040 1528.048200 0.000000
1097.0 0 1096 TOTAL_IN 8607.086000 5249.675000 2019.345700
1097.0 0 1096 TO_STORAGE 0.000000 0.000000 0.000000
1097.0 0 1096 TO_CONSTANT_HEAD 230.548360 215.701500 299.113800
1097.0 0 1096 TO_WELLS 4762.800000 0.000000 0.000000
1097.0 0 1096 TO_DRAINS 0.000000 0.000000 0.000000
1097.0 0 1096 TO_RECHARGE 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_1 0.000000 3269.318800 192.186040
1097.0 0 1096 TO_ZONE_2 3475.123500 0.000000 1528.048200
1097.0 0 1096 TO_ZONE_3 138.600450 1764.655300 0.000000
1097.0 0 1096 TOTAL_OUT 8607.072000 5249.676000 2019.348000
1097.0 0 1096 IN-OUT 0.013672 0.000977 0.002319
1097.0 0 1096 PERCENT_DISCREPANCY 0.000159 0.000019 0.000115
###Markdown
Net BudgetUsing the "net" keyword argument, we can request a net budget for each zone/record name or for a subset of zones and record names. Note that we can identify the record names we want without the added `"_IN"` or `"_OUT"` string suffix.
###Code
zon = np.ones((nlay, nrow, ncol), np.int)
zon[1, :, :] = 2
zon[2, :, :] = 3
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_budget(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df = zb.get_dataframes(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df.head(6)
###Output
_____no_output_____
###Markdown
Plot Budget ComponentsThe following is a function that can be used to better visualize the budget components using matplotlib.
###Code
def tick_label_formatter_comma_sep(x, pos):
return '{:,.0f}'.format(x)
def volumetric_budget_bar_plot(values_in, values_out, labels, **kwargs):
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = plt.gca()
x_pos = np.arange(len(values_in))
rects_in = ax.bar(x_pos, values_in, align='center', alpha=0.5)
x_pos = np.arange(len(values_out))
rects_out = ax.bar(x_pos, values_out, align='center', alpha=0.5)
plt.xticks(list(x_pos), labels)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=90)
ax.get_yaxis().set_major_formatter(mpl.ticker.FuncFormatter(tick_label_formatter_comma_sep))
ymin, ymax = ax.get_ylim()
if ymax != 0:
if abs(ymin) / ymax < .33:
ymin = -(ymax * .5)
else:
ymin *= 1.35
else:
ymin *= 1.35
plt.ylim([ymin, ymax * 1.25])
for i, rect in enumerate(rects_in):
label = '{:,.0f}'.format(values_in[i])
height = values_in[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymax)
vertical_alignment = 'bottom'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
for i, rect in enumerate(rects_out):
label = '{:,.0f}'.format(values_out[i])
height = values_out[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymin)
vertical_alignment = 'top'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
# horizontal line indicating zero
ax.plot([rects_in[0].get_x() - rects_in[0].get_width() / 2,
rects_in[-1].get_x() + rects_in[-1].get_width()], [0, 0], "k")
return rects_in, rects_out
fig = plt.figure(figsize=(16, 5))
times = [2., 500., 1000., 1095.]
for idx, t in enumerate(times):
ax = fig.add_subplot(1, len(times), idx + 1)
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=None, totim=t, aliases=aliases)
recname = 'STORAGE'
values_in = zb.get_dataframes(names='FROM_{}'.format(recname)).T.squeeze()
values_out = zb.get_dataframes(names='TO_{}'.format(recname)).T.squeeze() * -1
labels = values_in.index.tolist()
rects_in, rects_out = volumetric_budget_bar_plot(values_in, values_out, labels, ax=ax)
plt.ylabel('Volumetric rate, in Mgal/d')
plt.title('{} @ totim = {}'.format(recname, t))
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
FloPy ZoneBudget ExampleThis notebook demonstrates how to use the `ZoneBudget` class to extract budget information from the cell by cell budget file using an array of zones.First set the path and import the required packages. The flopy path doesn't have to be set if you install flopy from a binary installer. If you want to run this notebook, you have to set the path to your own flopy path.
###Code
%matplotlib inline
import os
import sys
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
# run installed version of flopy or add local path
try:
import flopy
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('pandas version: {}'.format(pd.__version__))
print('flopy version: {}'.format(flopy.__version__))
# Set path to example datafiles
loadpth = os.path.join('..', 'data', 'zonbud_examples')
cbc_f = os.path.join(loadpth, 'freyberg.gitcbc')
###Output
_____no_output_____
###Markdown
Read File Containing ZonesUsing the `read_zbarray` utility, we can import zonebudget-style array files.
###Code
from flopy.utils import read_zbarray
zone_file = os.path.join(loadpth, 'zonef_mlt.zbr')
zon = read_zbarray(zone_file)
nlay, nrow, ncol = zon.shape
fig = plt.figure(figsize=(10, 4))
for lay in range(nlay):
ax = fig.add_subplot(1, nlay, lay+1)
im = ax.pcolormesh(zon[lay, :, :])
cbar = plt.colorbar(im)
plt.gca().set_aspect('equal')
plt.show()
np.unique(zon)
###Output
_____no_output_____
###Markdown
Extract Budget Information from ZoneBudget ObjectAt the core of the `ZoneBudget` object is a numpy structured array. The class provides some wrapper functions to help us interogate the array and save it to disk.
###Code
# Create a ZoneBudget object and get the budget record array
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 1096))
zb.get_budget()
# Get a list of the unique budget record names
zb.get_record_names()
# Look at a subset of fluxes
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zb.get_budget(names=names)
# Look at fluxes in from zone 2
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zones = ['ZONE_2']
zb.get_budget(names=names, zones=zones)
# Look at all of the mass-balance records
names = ['TOTAL_IN', 'TOTAL_OUT', 'IN-OUT', 'PERCENT_DISCREPANCY']
zb.get_budget(names=names)
###Output
_____no_output_____
###Markdown
Convert UnitsThe `ZoneBudget` class supports the use of mathematical operators and returns a new copy of the object.
###Code
cmd = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 0))
cfd = cmd / 35.3147
inyr = (cfd / (250 * 250)) * 365 * 12
cmdbud = cmd.get_budget()
cfdbud = cfd.get_budget()
inyrbud = inyr.get_budget()
names = ['FROM_RECHARGE']
rowidx = np.in1d(cmdbud['name'], names)
colidx = 'ZONE_1'
print('{:,.1f} cubic meters/day'.format(cmdbud[rowidx][colidx][0]))
print('{:,.1f} cubic feet/day'.format(cfdbud[rowidx][colidx][0]))
print('{:,.1f} inches/year'.format(inyrbud[rowidx][colidx][0]))
cmd is cfd
###Output
_____no_output_____
###Markdown
Alias NamesA dictionary of {zone: "alias"} pairs can be passed to replace the typical "ZONE_X" fieldnames of the `ZoneBudget` structured array with more descriptive names.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=[1097.], aliases=aliases)
zb.get_budget()
###Output
_____no_output_____
###Markdown
Return the Budgets as a Pandas DataFrameSet `kstpkper` and `totim` keyword args to `None` (or omit) to return all times.The `get_dataframes()` method will return a DataFrame multi-indexed on `totim` and `name`.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_dataframes()
###Output
_____no_output_____
###Markdown
Slice the multi-index dataframe to retrieve a subset of the budget.NOTE: We can pass "names" directly to the `get_dataframes()` method to return a subset of reocrds. By omitting the `"FROM_"` or `"TO_"` prefix we get both.
###Code
dateidx1 = 1095.
dateidx2 = 1097.
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Look at pumpage (`TO_WELLS`) as a percentage of recharge (`FROM_RECHARGE`)
###Code
dateidx1 = 1095.
dateidx2 = 1097.
zones = ['SURF']
# Pull out the individual records of interest
rech = df.loc[(slice(dateidx1, dateidx2), ['FROM_RECHARGE']), :][zones]
pump = df.loc[(slice(dateidx1, dateidx2), ['TO_WELLS']), :][zones]
# Remove the "record" field from the index so we can
# take the difference of the two DataFrames
rech = rech.reset_index()
rech = rech.set_index(['totim'])
rech = rech[zones]
pump = pump.reset_index()
pump = pump.set_index(['totim'])
pump = pump[zones] * -1
# Compute pumping as a percentage of recharge
(pump / rech) * 100.
###Output
_____no_output_____
###Markdown
Pass `start_datetime` and `timeunit` keyword arguments to return a dataframe with a datetime multi-index
###Code
dateidx1 = pd.Timestamp('1972-12-29')
dateidx2 = pd.Timestamp('1972-12-30')
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(start_datetime='1970-01-01', timeunit='D', names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Pass `index_key` to indicate which fields to use in the multi-index (default is "totim"; valid keys are "totim" and "kstpkper")
###Code
df = zb.get_dataframes(index_key='kstpkper')
df.head()
###Output
_____no_output_____
###Markdown
Write Budget Output to CSVWe can write the resulting recarray to a csv file with the `.to_csv()` method of the `ZoneBudget` object.
###Code
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=[(0, 0), (0, 1096)])
f_out = os.path.join(loadpth, 'Example_output.csv')
zb.to_csv(f_out)
# Read the file in to see the contents
try:
import pandas as pd
print(pd.read_csv(f_out).to_string(index=False))
except:
with open(fname, 'r') as f:
for line in f.readlines():
print('\t'.join(line.split(',')))
###Output
totim time_step stress_period name ZONE_1 ZONE_2 ZONE_3
1.0 0 0 FROM_STORAGE 0.000000 0.000000 0.000000
1.0 0 0 FROM_CONSTANT_HEAD 0.000000 0.000000 0.000000
1.0 0 0 FROM_WELLS 0.000000 0.000000 0.000000
1.0 0 0 FROM_DRAINS 0.000000 0.000000 0.000000
1.0 0 0 FROM_RECHARGE 6222.670000 18.062900 36.125800
1.0 0 0 FROM_ZONE_0 0.000000 0.000000 0.000000
1.0 0 0 FROM_ZONE_1 0.000000 4275.260000 491.945000
1.0 0 0 FROM_ZONE_2 2744.820000 0.000000 2115.650000
1.0 0 0 FROM_ZONE_3 451.546000 1215.950000 0.000000
1.0 0 0 TOTAL_IN 9419.040000 5509.270000 2643.730000
1.0 0 0 TO_STORAGE 0.000000 0.000000 0.000000
1.0 0 0 TO_CONSTANT_HEAD 821.283000 648.806000 976.234000
1.0 0 0 TO_WELLS 0.000000 0.000000 0.000000
1.0 0 0 TO_DRAINS 3832.150000 0.000000 0.000000
1.0 0 0 TO_RECHARGE 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_0 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_1 0.000000 2744.820000 451.546000
1.0 0 0 TO_ZONE_2 4275.260000 0.000000 1215.950000
1.0 0 0 TO_ZONE_3 491.945000 2115.650000 0.000000
1.0 0 0 TOTAL_OUT 9420.640000 5509.280000 2643.730000
1.0 0 0 IN-OUT 1.594730 0.009766 0.006348
1.0 0 0 PERCENT_DISCREPANCY 0.016929 0.000177 0.000240
1097.0 0 1096 FROM_STORAGE 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_CONSTANT_HEAD 0.000000 231.567000 86.217200
1097.0 0 1096 FROM_WELLS 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_DRAINS 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_RECHARGE 5145.580000 14.936400 29.872800
1097.0 0 1096 FROM_ZONE_0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_ZONE_1 0.000000 3475.120000 138.600000
1097.0 0 1096 FROM_ZONE_2 3269.320000 0.000000 1764.660000
1097.0 0 1096 FROM_ZONE_3 192.186000 1528.050000 0.000000
1097.0 0 1096 TOTAL_IN 8607.090000 5249.670000 2019.350000
1097.0 0 1096 TO_STORAGE 0.000000 0.000000 0.000000
1097.0 0 1096 TO_CONSTANT_HEAD 230.548000 215.702000 299.114000
1097.0 0 1096 TO_WELLS 4762.800000 0.000000 0.000000
1097.0 0 1096 TO_DRAINS 0.000000 0.000000 0.000000
1097.0 0 1096 TO_RECHARGE 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_1 0.000000 3269.320000 192.186000
1097.0 0 1096 TO_ZONE_2 3475.120000 0.000000 1528.050000
1097.0 0 1096 TO_ZONE_3 138.600000 1764.660000 0.000000
1097.0 0 1096 TOTAL_OUT 8607.070000 5249.680000 2019.350000
1097.0 0 1096 IN-OUT 0.013672 0.000977 0.002319
1097.0 0 1096 PERCENT_DISCREPANCY 0.000159 0.000019 0.000115
###Markdown
Net BudgetUsing the "net" keyword argument, we can request a net budget for each zone/record name or for a subset of zones and record names. Note that we can identify the record names we want without the added `"_IN"` or `"_OUT"` string suffix.
###Code
zon = np.ones((nlay, nrow, ncol), np.int)
zon[1, :, :] = 2
zon[2, :, :] = 3
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_budget(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df = zb.get_dataframes(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df.head(6)
###Output
_____no_output_____
###Markdown
Plot Budget ComponentsThe following is a function that can be used to better visualize the budget components using matplotlib.
###Code
def tick_label_formatter_comma_sep(x, pos):
return '{:,.0f}'.format(x)
def volumetric_budget_bar_plot(values_in, values_out, labels, **kwargs):
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = plt.gca()
x_pos = np.arange(len(values_in))
rects_in = ax.bar(x_pos, values_in, align='center', alpha=0.5)
x_pos = np.arange(len(values_out))
rects_out = ax.bar(x_pos, values_out, align='center', alpha=0.5)
plt.xticks(list(x_pos), labels)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=90)
ax.get_yaxis().set_major_formatter(mpl.ticker.FuncFormatter(tick_label_formatter_comma_sep))
ymin, ymax = ax.get_ylim()
if ymax != 0:
if abs(ymin) / ymax < .33:
ymin = -(ymax * .5)
else:
ymin *= 1.35
else:
ymin *= 1.35
plt.ylim([ymin, ymax * 1.25])
for i, rect in enumerate(rects_in):
label = '{:,.0f}'.format(values_in[i])
height = values_in[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymax)
vertical_alignment = 'bottom'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
for i, rect in enumerate(rects_out):
label = '{:,.0f}'.format(values_out[i])
height = values_out[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymin)
vertical_alignment = 'top'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
# horizontal line indicating zero
ax.plot([rects_in[0].get_x() - rects_in[0].get_width() / 2,
rects_in[-1].get_x() + rects_in[-1].get_width()], [0, 0], "k")
return rects_in, rects_out
fig = plt.figure(figsize=(16, 5))
times = [2., 500., 1000., 1095.]
for idx, t in enumerate(times):
ax = fig.add_subplot(1, len(times), idx + 1)
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=None, totim=t, aliases=aliases)
recname = 'STORAGE'
values_in = zb.get_dataframes(names='FROM_{}'.format(recname)).T.squeeze()
values_out = zb.get_dataframes(names='TO_{}'.format(recname)).T.squeeze() * -1
labels = values_in.index.tolist()
rects_in, rects_out = volumetric_budget_bar_plot(values_in, values_out, labels, ax=ax)
plt.ylabel('Volumetric rate, in Mgal/d')
plt.title('{} @ totim = {}'.format(recname, t))
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
FloPy ZoneBudget ExampleThis notebook demonstrates how to use the `ZoneBudget` class to extract budget information from the cell by cell budget file using an array of zones.First set the path and import the required packages. The flopy path doesn't have to be set if you install flopy from a binary installer. If you want to run this notebook, you have to set the path to your own flopy path.
###Code
import os
import sys
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
# run installed version of flopy or add local path
try:
import flopy
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('pandas version: {}'.format(pd.__version__))
print('flopy version: {}'.format(flopy.__version__))
# Set path to example datafiles
loadpth = os.path.join('..', 'data', 'zonbud_examples')
cbc_f = os.path.join(loadpth, 'freyberg.gitcbc')
###Output
_____no_output_____
###Markdown
Read File Containing ZonesUsing the `read_zbarray` utility, we can import zonebudget-style array files.
###Code
from flopy.utils import read_zbarray
zone_file = os.path.join(loadpth, 'zonef_mlt.zbr')
zon = read_zbarray(zone_file)
nlay, nrow, ncol = zon.shape
fig = plt.figure(figsize=(10, 4))
for lay in range(nlay):
ax = fig.add_subplot(1, nlay, lay+1)
im = ax.pcolormesh(zon[lay, ::-1, :])
cbar = plt.colorbar(im)
plt.gca().set_aspect('equal')
plt.show()
###Output
_____no_output_____
###Markdown
Extract Budget Information from ZoneBudget ObjectAt the core of the `ZoneBudget` object is a numpy structured array. The class provides some wrapper functions to help us interogate the array and save it to disk.
###Code
# Create a ZoneBudget object and get the budget record array
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 1096))
zb.get_budget()
# Get a list of the unique budget record names
zb.get_record_names()
# Look at a subset of fluxes
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zb.get_budget(names=names)
# Look at fluxes in from zone 2
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zones = ['ZONE_2']
zb.get_budget(names=names, zones=zones)
# Look at all of the mass-balance records
names = ['TOTAL_IN', 'TOTAL_OUT', 'IN-OUT', 'PERCENT_DISCREPANCY']
zb.get_budget(names=names)
###Output
_____no_output_____
###Markdown
Convert UnitsThe `ZoneBudget` class supports the use of mathematical operators and returns a new copy of the object.
###Code
cmd = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 0))
cfd = cmd / 35.3147
inyr = (cfd / (250 * 250)) * 365 * 12
cmdbud = cmd.get_budget()
cfdbud = cfd.get_budget()
inyrbud = inyr.get_budget()
names = ['FROM_RECHARGE']
rowidx = np.in1d(cmdbud['name'], names)
colidx = 'ZONE_1'
print('{:,.1f} cubic meters/day'.format(cmdbud[rowidx][colidx][0]))
print('{:,.1f} cubic feet/day'.format(cfdbud[rowidx][colidx][0]))
print('{:,.1f} inches/year'.format(inyrbud[rowidx][colidx][0]))
cmd is cfd
###Output
_____no_output_____
###Markdown
Alias NamesA dictionary of {zone: "alias"} pairs can be passed to replace the typical "ZONE_X" fieldnames of the `ZoneBudget` structured array with more descriptive names.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=[1097.], aliases=aliases)
zb.get_budget()
###Output
_____no_output_____
###Markdown
Return the Budgets as a Pandas DataFrameSet `kstpkper` and `totim` keyword args to `None` (or omit) to return all times.The `get_dataframes()` method will return a DataFrame multi-indexed on `totim` and `name`.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_dataframes()
###Output
_____no_output_____
###Markdown
Slice the multi-index dataframe to retrieve a subset of the budget.NOTE: We can pass "names" directly to the `get_dataframes()` method to return a subset of reocrds. By omitting the `"FROM_"` or `"TO_"` prefix we get both.
###Code
dateidx1 = 1095.
dateidx2 = 1097.
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Look at pumpage (`TO_WELLS`) as a percentage of recharge (`FROM_RECHARGE`)
###Code
dateidx1 = 1095.
dateidx2 = 1097.
zones = ['SURF']
# Pull out the individual records of interest
rech = df.loc[(slice(dateidx1, dateidx2), ['FROM_RECHARGE']), :][zones]
pump = df.loc[(slice(dateidx1, dateidx2), ['TO_WELLS']), :][zones]
# Remove the "record" field from the index so we can
# take the difference of the two DataFrames
rech = rech.reset_index()
rech = rech.set_index(['totim'])
rech = rech[zones]
pump = pump.reset_index()
pump = pump.set_index(['totim'])
pump = pump[zones] * -1
# Compute pumping as a percentage of recharge
(pump / rech) * 100.
###Output
_____no_output_____
###Markdown
Pass `start_datetime` and `timeunit` keyword arguments to return a dataframe with a datetime multi-index
###Code
dateidx1 = pd.Timestamp('1972-12-29')
dateidx2 = pd.Timestamp('1972-12-30')
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(start_datetime='1970-01-01', timeunit='D', names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Pass `index_key` to indicate which fields to use in the multi-index (default is "totim"; valid keys are "totim" and "kstpkper")
###Code
df = zb.get_dataframes(index_key='kstpkper')
df.head()
###Output
_____no_output_____
###Markdown
Write Budget Output to CSVWe can write the resulting recarray to a csv file with the `.to_csv()` method of the `ZoneBudget` object.
###Code
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=[(0, 0), (0, 1096)])
f_out = os.path.join('data', 'Example_output.csv')
zb.to_csv(f_out)
# Read the file in to see the contents
try:
import pandas as pd
print(pd.read_csv(f_out).to_string(index=False))
except:
with open(fname, 'r') as f:
for line in f.readlines():
print('\t'.join(line.split(',')))
###Output
totim time_step stress_period name ZONE_0 ZONE_1 ZONE_2 ZONE_3
1.0 0 0 FROM_STORAGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_CONSTANT_HEAD 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_WELLS 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_DRAINS 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_RECHARGE 0.0 6222.673300 18.062912 36.125824
1.0 0 0 FROM_ZONE_0 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_ZONE_1 0.0 0.000000 4275.257300 491.945070
1.0 0 0 FROM_ZONE_2 0.0 2744.821800 0.000000 2115.654000
1.0 0 0 FROM_ZONE_3 0.0 451.545720 1215.952300 0.000000
1.0 0 0 TOTAL_IN 0.0 9419.041000 5509.272500 2643.725000
1.0 0 0 TO_STORAGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_CONSTANT_HEAD 0.0 821.283200 648.806100 976.233600
1.0 0 0 TO_WELLS 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_DRAINS 0.0 3832.150000 0.000000 0.000000
1.0 0 0 TO_RECHARGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_0 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_1 0.0 0.000000 2744.821800 451.545720
1.0 0 0 TO_ZONE_2 0.0 4275.257300 0.000000 1215.952300
1.0 0 0 TO_ZONE_3 0.0 491.945070 2115.654000 0.000000
1.0 0 0 TOTAL_OUT 0.0 9420.636000 5509.282000 2643.731400
1.0 0 0 IN-OUT 0.0 1.594727 0.009766 0.006348
1.0 0 0 PERCENT_DISCREPANCY NaN 0.016929 0.000177 0.000240
1097.0 0 1096 FROM_STORAGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_CONSTANT_HEAD 0.0 0.000000 231.566590 86.217200
1097.0 0 1096 FROM_WELLS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_DRAINS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_RECHARGE 0.0 5145.581500 14.936376 29.872751
1097.0 0 1096 FROM_ZONE_0 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_ZONE_1 0.0 0.000000 3475.123500 138.600450
1097.0 0 1096 FROM_ZONE_2 0.0 3269.318800 0.000000 1764.655300
1097.0 0 1096 FROM_ZONE_3 0.0 192.186040 1528.048200 0.000000
1097.0 0 1096 TOTAL_IN 0.0 8607.086000 5249.675000 2019.345700
1097.0 0 1096 TO_STORAGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_CONSTANT_HEAD 0.0 230.548360 215.701500 299.113800
1097.0 0 1096 TO_WELLS 0.0 4762.800000 0.000000 0.000000
1097.0 0 1096 TO_DRAINS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_RECHARGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_0 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_1 0.0 0.000000 3269.318800 192.186040
1097.0 0 1096 TO_ZONE_2 0.0 3475.123500 0.000000 1528.048200
1097.0 0 1096 TO_ZONE_3 0.0 138.600450 1764.655300 0.000000
1097.0 0 1096 TOTAL_OUT 0.0 8607.072000 5249.676000 2019.348000
1097.0 0 1096 IN-OUT 0.0 0.013672 0.000977 0.002319
1097.0 0 1096 PERCENT_DISCREPANCY NaN 0.000159 0.000019 0.000115
###Markdown
Net BudgetUsing the "net" keyword argument, we can request a net budget for each zone/record name or for a subset of zones and record names. Note that we can identify the record names we want without the added `"_IN"` or `"_OUT"` string suffix.
###Code
zon = np.ones((nlay, nrow, ncol), int)
zon[1, :, :] = 2
zon[2, :, :] = 3
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_budget(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df = zb.get_dataframes(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df.head(6)
###Output
_____no_output_____
###Markdown
Plot Budget ComponentsThe following is a function that can be used to better visualize the budget components using matplotlib.
###Code
def tick_label_formatter_comma_sep(x, pos):
return '{:,.0f}'.format(x)
def volumetric_budget_bar_plot(values_in, values_out, labels, **kwargs):
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = plt.gca()
x_pos = np.arange(len(values_in))
rects_in = ax.bar(x_pos, values_in, align='center', alpha=0.5)
x_pos = np.arange(len(values_out))
rects_out = ax.bar(x_pos, values_out, align='center', alpha=0.5)
plt.xticks(list(x_pos), labels)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=90)
ax.get_yaxis().set_major_formatter(mpl.ticker.FuncFormatter(tick_label_formatter_comma_sep))
ymin, ymax = ax.get_ylim()
if ymax != 0:
if abs(ymin) / ymax < .33:
ymin = -(ymax * .5)
else:
ymin *= 1.35
else:
ymin *= 1.35
plt.ylim([ymin, ymax * 1.25])
for i, rect in enumerate(rects_in):
label = '{:,.0f}'.format(values_in[i])
height = values_in[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymax)
vertical_alignment = 'bottom'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
for i, rect in enumerate(rects_out):
label = '{:,.0f}'.format(values_out[i])
height = values_out[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymin)
vertical_alignment = 'top'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
# horizontal line indicating zero
ax.plot([rects_in[0].get_x() - rects_in[0].get_width() / 2,
rects_in[-1].get_x() + rects_in[-1].get_width()], [0, 0], "k")
return rects_in, rects_out
fig = plt.figure(figsize=(16, 5))
times = [2., 500., 1000., 1095.]
for idx, t in enumerate(times):
ax = fig.add_subplot(1, len(times), idx + 1)
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=None, totim=t, aliases=aliases)
recname = 'STORAGE'
values_in = zb.get_dataframes(names='FROM_{}'.format(recname)).T.squeeze()
values_out = zb.get_dataframes(names='TO_{}'.format(recname)).T.squeeze() * -1
labels = values_in.index.tolist()
rects_in, rects_out = volumetric_budget_bar_plot(values_in, values_out, labels, ax=ax)
plt.ylabel('Volumetric rate, in Mgal/d')
plt.title('{} @ totim = {}'.format(recname, t))
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
FloPy ZoneBudget ExampleThis notebook demonstrates how to use the `ZoneBudget` class to extract budget information from the cell by cell budget file using an array of zones.First set the path and import the required packages. The flopy path doesn't have to be set if you install flopy from a binary installer. If you want to run this notebook, you have to set the path to your own flopy path.
###Code
import os
import sys
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
# run installed version of flopy or add local path
try:
import flopy
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('pandas version: {}'.format(pd.__version__))
print('flopy version: {}'.format(flopy.__version__))
# Set path to example datafiles
loadpth = os.path.join('..', 'data', 'zonbud_examples')
cbc_f = os.path.join(loadpth, 'freyberg.gitcbc')
###Output
_____no_output_____
###Markdown
Read File Containing ZonesUsing the `read_zbarray` utility, we can import zonebudget-style array files.
###Code
from flopy.utils import read_zbarray
zone_file = os.path.join(loadpth, 'zonef_mlt.zbr')
zon = read_zbarray(zone_file)
nlay, nrow, ncol = zon.shape
fig = plt.figure(figsize=(10, 4))
for lay in range(nlay):
ax = fig.add_subplot(1, nlay, lay+1)
im = ax.pcolormesh(zon[lay, ::-1, :])
cbar = plt.colorbar(im)
plt.gca().set_aspect('equal')
plt.show()
###Output
_____no_output_____
###Markdown
Extract Budget Information from ZoneBudget ObjectAt the core of the `ZoneBudget` object is a numpy structured array. The class provides some wrapper functions to help us interogate the array and save it to disk.
###Code
# Create a ZoneBudget object and get the budget record array
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 1096))
zb.get_budget()
# Get a list of the unique budget record names
zb.get_record_names()
# Look at a subset of fluxes
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zb.get_budget(names=names)
# Look at fluxes in from zone 2
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zones = ['ZONE_2']
zb.get_budget(names=names, zones=zones)
# Look at all of the mass-balance records
names = ['TOTAL_IN', 'TOTAL_OUT', 'IN-OUT', 'PERCENT_DISCREPANCY']
zb.get_budget(names=names)
###Output
_____no_output_____
###Markdown
Convert UnitsThe `ZoneBudget` class supports the use of mathematical operators and returns a new copy of the object.
###Code
cmd = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 0))
cfd = cmd / 35.3147
inyr = (cfd / (250 * 250)) * 365 * 12
cmdbud = cmd.get_budget()
cfdbud = cfd.get_budget()
inyrbud = inyr.get_budget()
names = ['FROM_RECHARGE']
rowidx = np.in1d(cmdbud['name'], names)
colidx = 'ZONE_1'
print('{:,.1f} cubic meters/day'.format(cmdbud[rowidx][colidx][0]))
print('{:,.1f} cubic feet/day'.format(cfdbud[rowidx][colidx][0]))
print('{:,.1f} inches/year'.format(inyrbud[rowidx][colidx][0]))
cmd is cfd
###Output
_____no_output_____
###Markdown
Alias NamesA dictionary of {zone: "alias"} pairs can be passed to replace the typical "ZONE_X" fieldnames of the `ZoneBudget` structured array with more descriptive names.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=[1097.], aliases=aliases)
zb.get_budget()
###Output
_____no_output_____
###Markdown
Return the Budgets as a Pandas DataFrameSet `kstpkper` and `totim` keyword args to `None` (or omit) to return all times.The `get_dataframes()` method will return a DataFrame multi-indexed on `totim` and `name`.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_dataframes()
###Output
_____no_output_____
###Markdown
Slice the multi-index dataframe to retrieve a subset of the budget.NOTE: We can pass "names" directly to the `get_dataframes()` method to return a subset of reocrds. By omitting the `"FROM_"` or `"TO_"` prefix we get both.
###Code
dateidx1 = 1095.
dateidx2 = 1097.
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Look at pumpage (`TO_WELLS`) as a percentage of recharge (`FROM_RECHARGE`)
###Code
dateidx1 = 1095.
dateidx2 = 1097.
zones = ['SURF']
# Pull out the individual records of interest
rech = df.loc[(slice(dateidx1, dateidx2), ['FROM_RECHARGE']), :][zones]
pump = df.loc[(slice(dateidx1, dateidx2), ['TO_WELLS']), :][zones]
# Remove the "record" field from the index so we can
# take the difference of the two DataFrames
rech = rech.reset_index()
rech = rech.set_index(['totim'])
rech = rech[zones]
pump = pump.reset_index()
pump = pump.set_index(['totim'])
pump = pump[zones] * -1
# Compute pumping as a percentage of recharge
(pump / rech) * 100.
###Output
_____no_output_____
###Markdown
Pass `start_datetime` and `timeunit` keyword arguments to return a dataframe with a datetime multi-index
###Code
dateidx1 = pd.Timestamp('1972-12-29')
dateidx2 = pd.Timestamp('1972-12-30')
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(start_datetime='1970-01-01', timeunit='D', names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Pass `index_key` to indicate which fields to use in the multi-index (default is "totim"; valid keys are "totim" and "kstpkper")
###Code
df = zb.get_dataframes(index_key='kstpkper')
df.head()
###Output
_____no_output_____
###Markdown
Write Budget Output to CSVWe can write the resulting recarray to a csv file with the `.to_csv()` method of the `ZoneBudget` object.
###Code
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=[(0, 0), (0, 1096)])
f_out = os.path.join('data', 'Example_output.csv')
zb.to_csv(f_out)
# Read the file in to see the contents
try:
import pandas as pd
print(pd.read_csv(f_out).to_string(index=False))
except:
with open(fname, 'r') as f:
for line in f.readlines():
print('\t'.join(line.split(',')))
###Output
totim time_step stress_period name ZONE_0 ZONE_1 ZONE_2 ZONE_3
1.0 0 0 FROM_STORAGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_CONSTANT_HEAD 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_WELLS 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_DRAINS 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_RECHARGE 0.0 6222.673300 18.062912 36.125824
1.0 0 0 FROM_ZONE_0 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_ZONE_1 0.0 0.000000 4275.257300 491.945070
1.0 0 0 FROM_ZONE_2 0.0 2744.821800 0.000000 2115.654000
1.0 0 0 FROM_ZONE_3 0.0 451.545720 1215.952300 0.000000
1.0 0 0 TOTAL_IN 0.0 9419.041000 5509.272500 2643.725000
1.0 0 0 TO_STORAGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_CONSTANT_HEAD 0.0 821.283200 648.806100 976.233600
1.0 0 0 TO_WELLS 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_DRAINS 0.0 3832.150000 0.000000 0.000000
1.0 0 0 TO_RECHARGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_0 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_1 0.0 0.000000 2744.821800 451.545720
1.0 0 0 TO_ZONE_2 0.0 4275.257300 0.000000 1215.952300
1.0 0 0 TO_ZONE_3 0.0 491.945070 2115.654000 0.000000
1.0 0 0 TOTAL_OUT 0.0 9420.636000 5509.282000 2643.731400
1.0 0 0 IN-OUT 0.0 1.594727 0.009766 0.006348
1.0 0 0 PERCENT_DISCREPANCY NaN 0.016929 0.000177 0.000240
1097.0 0 1096 FROM_STORAGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_CONSTANT_HEAD 0.0 0.000000 231.566590 86.217200
1097.0 0 1096 FROM_WELLS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_DRAINS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_RECHARGE 0.0 5145.581500 14.936376 29.872751
1097.0 0 1096 FROM_ZONE_0 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_ZONE_1 0.0 0.000000 3475.123500 138.600450
1097.0 0 1096 FROM_ZONE_2 0.0 3269.318800 0.000000 1764.655300
1097.0 0 1096 FROM_ZONE_3 0.0 192.186040 1528.048200 0.000000
1097.0 0 1096 TOTAL_IN 0.0 8607.086000 5249.675000 2019.345700
1097.0 0 1096 TO_STORAGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_CONSTANT_HEAD 0.0 230.548360 215.701500 299.113800
1097.0 0 1096 TO_WELLS 0.0 4762.800000 0.000000 0.000000
1097.0 0 1096 TO_DRAINS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_RECHARGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_0 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_1 0.0 0.000000 3269.318800 192.186040
1097.0 0 1096 TO_ZONE_2 0.0 3475.123500 0.000000 1528.048200
1097.0 0 1096 TO_ZONE_3 0.0 138.600450 1764.655300 0.000000
1097.0 0 1096 TOTAL_OUT 0.0 8607.072000 5249.676000 2019.348000
1097.0 0 1096 IN-OUT 0.0 0.013672 0.000977 0.002319
1097.0 0 1096 PERCENT_DISCREPANCY NaN 0.000159 0.000019 0.000115
###Markdown
Net BudgetUsing the "net" keyword argument, we can request a net budget for each zone/record name or for a subset of zones and record names. Note that we can identify the record names we want without the added `"_IN"` or `"_OUT"` string suffix.
###Code
zon = np.ones((nlay, nrow, ncol), np.int)
zon[1, :, :] = 2
zon[2, :, :] = 3
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_budget(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df = zb.get_dataframes(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df.head(6)
###Output
_____no_output_____
###Markdown
Plot Budget ComponentsThe following is a function that can be used to better visualize the budget components using matplotlib.
###Code
def tick_label_formatter_comma_sep(x, pos):
return '{:,.0f}'.format(x)
def volumetric_budget_bar_plot(values_in, values_out, labels, **kwargs):
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = plt.gca()
x_pos = np.arange(len(values_in))
rects_in = ax.bar(x_pos, values_in, align='center', alpha=0.5)
x_pos = np.arange(len(values_out))
rects_out = ax.bar(x_pos, values_out, align='center', alpha=0.5)
plt.xticks(list(x_pos), labels)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=90)
ax.get_yaxis().set_major_formatter(mpl.ticker.FuncFormatter(tick_label_formatter_comma_sep))
ymin, ymax = ax.get_ylim()
if ymax != 0:
if abs(ymin) / ymax < .33:
ymin = -(ymax * .5)
else:
ymin *= 1.35
else:
ymin *= 1.35
plt.ylim([ymin, ymax * 1.25])
for i, rect in enumerate(rects_in):
label = '{:,.0f}'.format(values_in[i])
height = values_in[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymax)
vertical_alignment = 'bottom'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
for i, rect in enumerate(rects_out):
label = '{:,.0f}'.format(values_out[i])
height = values_out[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymin)
vertical_alignment = 'top'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
# horizontal line indicating zero
ax.plot([rects_in[0].get_x() - rects_in[0].get_width() / 2,
rects_in[-1].get_x() + rects_in[-1].get_width()], [0, 0], "k")
return rects_in, rects_out
fig = plt.figure(figsize=(16, 5))
times = [2., 500., 1000., 1095.]
for idx, t in enumerate(times):
ax = fig.add_subplot(1, len(times), idx + 1)
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=None, totim=t, aliases=aliases)
recname = 'STORAGE'
values_in = zb.get_dataframes(names='FROM_{}'.format(recname)).T.squeeze()
values_out = zb.get_dataframes(names='TO_{}'.format(recname)).T.squeeze() * -1
labels = values_in.index.tolist()
rects_in, rects_out = volumetric_budget_bar_plot(values_in, values_out, labels, ax=ax)
plt.ylabel('Volumetric rate, in Mgal/d')
plt.title('{} @ totim = {}'.format(recname, t))
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
FloPy ZoneBudget ExampleThis notebook demonstrates how to use the `ZoneBudget` class to extract budget information from the cell by cell budget file using an array of zones.First set the path and import the required packages. The flopy path doesn't have to be set if you install flopy from a binary installer. If you want to run this notebook, you have to set the path to your own flopy path.
###Code
import os
import sys
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
# run installed version of flopy or add local path
try:
import flopy
except:
fpth = os.path.abspath(os.path.join("..", ".."))
sys.path.append(fpth)
import flopy
print(sys.version)
print("numpy version: {}".format(np.__version__))
print("matplotlib version: {}".format(mpl.__version__))
print("pandas version: {}".format(pd.__version__))
print("flopy version: {}".format(flopy.__version__))
# Set path to example datafiles
loadpth = os.path.join("..", "data", "zonbud_examples")
cbc_f = os.path.join(loadpth, "freyberg.gitcbc")
###Output
_____no_output_____
###Markdown
Read File Containing ZonesUsing the `ZoneBudget.read_zone_file()` utility, we can import zonebudget-style array files.
###Code
from flopy.utils import ZoneBudget
zone_file = os.path.join(loadpth, "zonef_mlt.zbr")
zon = ZoneBudget.read_zone_file(zone_file)
nlay, nrow, ncol = zon.shape
fig = plt.figure(figsize=(10, 4))
for lay in range(nlay):
ax = fig.add_subplot(1, nlay, lay + 1)
im = ax.pcolormesh(zon[lay, ::-1, :])
cbar = plt.colorbar(im)
plt.gca().set_aspect("equal")
plt.show()
###Output
_____no_output_____
###Markdown
Extract Budget Information from ZoneBudget ObjectAt the core of the `ZoneBudget` object is a numpy structured array. The class provides some wrapper functions to help us interogate the array and save it to disk.
###Code
# Create a ZoneBudget object and get the budget record array
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 1096))
zb.get_budget()
# Get a list of the unique budget record names
zb.get_record_names()
# Look at a subset of fluxes
names = ["FROM_RECHARGE", "FROM_ZONE_1", "FROM_ZONE_3"]
zb.get_budget(names=names)
# Look at fluxes in from zone 2
names = ["FROM_RECHARGE", "FROM_ZONE_1", "FROM_ZONE_3"]
zones = ["ZONE_2"]
zb.get_budget(names=names, zones=zones)
# Look at all of the mass-balance records
names = ["TOTAL_IN", "TOTAL_OUT", "IN-OUT", "PERCENT_DISCREPANCY"]
zb.get_budget(names=names)
###Output
_____no_output_____
###Markdown
Convert UnitsThe `ZoneBudget` class supports the use of mathematical operators and returns a new copy of the object.
###Code
cmd = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 0))
cfd = cmd / 35.3147
inyr = (cfd / (250 * 250)) * 365 * 12
cmdbud = cmd.get_budget()
cfdbud = cfd.get_budget()
inyrbud = inyr.get_budget()
names = ["FROM_RECHARGE"]
rowidx = np.in1d(cmdbud["name"], names)
colidx = "ZONE_1"
print("{:,.1f} cubic meters/day".format(cmdbud[rowidx][colidx][0]))
print("{:,.1f} cubic feet/day".format(cfdbud[rowidx][colidx][0]))
print("{:,.1f} inches/year".format(inyrbud[rowidx][colidx][0]))
cmd is cfd
###Output
_____no_output_____
###Markdown
Alias NamesA dictionary of {zone: "alias"} pairs can be passed to replace the typical "ZONE_X" fieldnames of the `ZoneBudget` structured array with more descriptive names.
###Code
aliases = {1: "SURF", 2: "CONF", 3: "UFA"}
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=[1097.0], aliases=aliases)
zb.get_budget()
###Output
_____no_output_____
###Markdown
Return the Budgets as a Pandas DataFrameSet `kstpkper` and `totim` keyword args to `None` (or omit) to return all times.The `get_dataframes()` method will return a DataFrame multi-indexed on `totim` and `name`.
###Code
aliases = {1: "SURF", 2: "CONF", 3: "UFA"}
times = list(range(1092, 1097 + 1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_dataframes()
###Output
_____no_output_____
###Markdown
Slice the multi-index dataframe to retrieve a subset of the budget.NOTE: We can pass "names" directly to the `get_dataframes()` method to return a subset of reocrds. By omitting the `"FROM_"` or `"TO_"` prefix we get both.
###Code
dateidx1 = 1095.0
dateidx2 = 1097.0
names = ["FROM_RECHARGE", "TO_WELLS", "CONSTANT_HEAD"]
zones = ["SURF", "CONF"]
df = zb.get_dataframes(names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Look at pumpage (`TO_WELLS`) as a percentage of recharge (`FROM_RECHARGE`)
###Code
dateidx1 = 1095.0
dateidx2 = 1097.0
zones = ["SURF"]
# Pull out the individual records of interest
rech = df.loc[(slice(dateidx1, dateidx2), ["FROM_RECHARGE"]), :][zones]
pump = df.loc[(slice(dateidx1, dateidx2), ["TO_WELLS"]), :][zones]
# Remove the "record" field from the index so we can
# take the difference of the two DataFrames
rech = rech.reset_index()
rech = rech.set_index(["totim"])
rech = rech[zones]
pump = pump.reset_index()
pump = pump.set_index(["totim"])
pump = pump[zones] * -1
# Compute pumping as a percentage of recharge
(pump / rech) * 100.0
###Output
_____no_output_____
###Markdown
Pass `start_datetime` and `timeunit` keyword arguments to return a dataframe with a datetime multi-index
###Code
dateidx1 = pd.Timestamp("1972-12-29")
dateidx2 = pd.Timestamp("1972-12-30")
names = ["FROM_RECHARGE", "TO_WELLS", "CONSTANT_HEAD"]
zones = ["SURF", "CONF"]
df = zb.get_dataframes(start_datetime="1970-01-01", timeunit="D", names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Pass `index_key` to indicate which fields to use in the multi-index (default is "totim"; valid keys are "totim" and "kstpkper")
###Code
df = zb.get_dataframes(index_key="kstpkper")
df.head()
###Output
_____no_output_____
###Markdown
Write Budget Output to CSVWe can write the resulting recarray to a csv file with the `.to_csv()` method of the `ZoneBudget` object.
###Code
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=[(0, 0), (0, 1096)])
f_out = os.path.join("data", "Example_output.csv")
zb.to_csv(f_out)
# Read the file in to see the contents
try:
import pandas as pd
print(pd.read_csv(f_out).to_string(index=False))
except:
with open(fname, "r") as f:
for line in f.readlines():
print("\t".join(line.split(",")))
###Output
totim time_step stress_period name ZONE_0 ZONE_1 ZONE_2 ZONE_3
1.0 0 0 FROM_STORAGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_CONSTANT_HEAD 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_WELLS 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_DRAINS 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_RECHARGE 0.0 6222.673300 18.062912 36.125824
1.0 0 0 FROM_ZONE_0 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_ZONE_1 0.0 0.000000 4275.257300 491.945070
1.0 0 0 FROM_ZONE_2 0.0 2744.821800 0.000000 2115.654000
1.0 0 0 FROM_ZONE_3 0.0 451.545720 1215.952300 0.000000
1.0 0 0 TOTAL_IN 0.0 9419.041000 5509.272500 2643.725000
1.0 0 0 TO_STORAGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_CONSTANT_HEAD 0.0 821.283200 648.806100 976.233600
1.0 0 0 TO_WELLS 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_DRAINS 0.0 3832.150000 0.000000 0.000000
1.0 0 0 TO_RECHARGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_0 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_1 0.0 0.000000 2744.821800 451.545720
1.0 0 0 TO_ZONE_2 0.0 4275.257300 0.000000 1215.952300
1.0 0 0 TO_ZONE_3 0.0 491.945070 2115.654000 0.000000
1.0 0 0 TOTAL_OUT 0.0 9420.636000 5509.282000 2643.731400
1.0 0 0 IN-OUT 0.0 1.594727 0.009766 0.006348
1.0 0 0 PERCENT_DISCREPANCY NaN 0.016929 0.000177 0.000240
1097.0 0 1096 FROM_STORAGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_CONSTANT_HEAD 0.0 0.000000 231.566590 86.217200
1097.0 0 1096 FROM_WELLS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_DRAINS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_RECHARGE 0.0 5145.581500 14.936376 29.872751
1097.0 0 1096 FROM_ZONE_0 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_ZONE_1 0.0 0.000000 3475.123500 138.600450
1097.0 0 1096 FROM_ZONE_2 0.0 3269.318800 0.000000 1764.655300
1097.0 0 1096 FROM_ZONE_3 0.0 192.186040 1528.048200 0.000000
1097.0 0 1096 TOTAL_IN 0.0 8607.086000 5249.675000 2019.345700
1097.0 0 1096 TO_STORAGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_CONSTANT_HEAD 0.0 230.548360 215.701500 299.113800
1097.0 0 1096 TO_WELLS 0.0 4762.800000 0.000000 0.000000
1097.0 0 1096 TO_DRAINS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_RECHARGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_0 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_1 0.0 0.000000 3269.318800 192.186040
1097.0 0 1096 TO_ZONE_2 0.0 3475.123500 0.000000 1528.048200
1097.0 0 1096 TO_ZONE_3 0.0 138.600450 1764.655300 0.000000
1097.0 0 1096 TOTAL_OUT 0.0 8607.072000 5249.676000 2019.348000
1097.0 0 1096 IN-OUT 0.0 0.013672 0.000977 0.002319
1097.0 0 1096 PERCENT_DISCREPANCY NaN 0.000159 0.000019 0.000115
###Markdown
Net BudgetUsing the "net" keyword argument, we can request a net budget for each zone/record name or for a subset of zones and record names. Note that we can identify the record names we want without the added `"_IN"` or `"_OUT"` string suffix.
###Code
zon = np.ones((nlay, nrow, ncol), int)
zon[1, :, :] = 2
zon[2, :, :] = 3
aliases = {1: "SURF", 2: "CONF", 3: "UFA"}
times = list(range(1092, 1097 + 1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_budget(names=["STORAGE", "WELLS"], zones=["SURF", "UFA"], net=True)
df = zb.get_dataframes(
names=["STORAGE", "WELLS"], zones=["SURF", "UFA"], net=True
)
df.head(6)
###Output
_____no_output_____
###Markdown
Plot Budget ComponentsThe following is a function that can be used to better visualize the budget components using matplotlib.
###Code
def tick_label_formatter_comma_sep(x, pos):
return "{:,.0f}".format(x)
def volumetric_budget_bar_plot(values_in, values_out, labels, **kwargs):
if "ax" in kwargs:
ax = kwargs.pop("ax")
else:
ax = plt.gca()
x_pos = np.arange(len(values_in))
rects_in = ax.bar(x_pos, values_in, align="center", alpha=0.5)
x_pos = np.arange(len(values_out))
rects_out = ax.bar(x_pos, values_out, align="center", alpha=0.5)
plt.xticks(list(x_pos), labels)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=90)
ax.get_yaxis().set_major_formatter(
mpl.ticker.FuncFormatter(tick_label_formatter_comma_sep)
)
ymin, ymax = ax.get_ylim()
if ymax != 0:
if abs(ymin) / ymax < 0.33:
ymin = -(ymax * 0.5)
else:
ymin *= 1.35
else:
ymin *= 1.35
plt.ylim([ymin, ymax * 1.25])
for i, rect in enumerate(rects_in):
label = "{:,.0f}".format(values_in[i])
height = values_in[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (0.02 * ymax)
vertical_alignment = "bottom"
horizontal_alignment = "center"
ax.text(
x,
y,
label,
ha=horizontal_alignment,
va=vertical_alignment,
rotation=90,
)
for i, rect in enumerate(rects_out):
label = "{:,.0f}".format(values_out[i])
height = values_out[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (0.02 * ymin)
vertical_alignment = "top"
horizontal_alignment = "center"
ax.text(
x,
y,
label,
ha=horizontal_alignment,
va=vertical_alignment,
rotation=90,
)
# horizontal line indicating zero
ax.plot(
[
rects_in[0].get_x() - rects_in[0].get_width() / 2,
rects_in[-1].get_x() + rects_in[-1].get_width(),
],
[0, 0],
"k",
)
return rects_in, rects_out
fig = plt.figure(figsize=(16, 5))
times = [2.0, 500.0, 1000.0, 1095.0]
for idx, t in enumerate(times):
ax = fig.add_subplot(1, len(times), idx + 1)
zb = flopy.utils.ZoneBudget(
cbc_f, zon, kstpkper=None, totim=t, aliases=aliases
)
recname = "STORAGE"
values_in = zb.get_dataframes(names="FROM_{}".format(recname)).T.squeeze()
values_out = (
zb.get_dataframes(names="TO_{}".format(recname)).T.squeeze() * -1
)
labels = values_in.index.tolist()
rects_in, rects_out = volumetric_budget_bar_plot(
values_in, values_out, labels, ax=ax
)
plt.ylabel("Volumetric rate, in Mgal/d")
plt.title("{} @ totim = {}".format(recname, t))
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Zonebudget for Modflow 6 (`ZoneBudget6`)This section shows how to build and run a Zonebudget when working with a MODFLOW 6 model. First let's load a model
###Code
mf6_exe = "mf6"
zb6_exe = "zbud6"
if platform.system().lower() == "windows":
mf6_exe += ".exe"
zb6_exe += ".exe"
sim_ws = os.path.join("..", "data", "mf6-freyberg")
cpth = os.path.join(".", "temp")
sim = flopy.mf6.MFSimulation.load(sim_ws=sim_ws, exe_name=mf6_exe)
sim.simulation_data.mfpath.set_sim_path(cpth)
sim.write_simulation()
sim.run_simulation();
###Output
loading simulation...
loading simulation name file...
loading tdis package...
loading model gwf6...
loading package dis...
loading package ic...
WARNING: Block "options" is not a valid block name for file type ic.
loading package oc...
loading package npf...
loading package sto...
loading package chd...
loading package riv...
loading package wel...
loading package rch...
loading ims package freyberg...
WARNING: MFFileMgt's set_sim_path has been deprecated. Please use MFSimulation's set_sim_path in the future.
writing simulation...
writing simulation name file...
writing simulation tdis package...
writing ims package freyberg...
writing model freyberg...
writing model name file...
writing package dis...
writing package ic...
writing package oc...
writing package npf...
writing package sto...
writing package chd_0...
writing package riv_0...
writing package wel_0...
writing package rch_0...
FloPy is using the following executable to run the model: /Users/jdhughes/.local/bin/mf6
MODFLOW 6
U.S. GEOLOGICAL SURVEY MODULAR HYDROLOGIC MODEL
VERSION 6.2.2 07/30/2021
***DEVELOP MODE***
MODFLOW 6 compiled Aug 31 2021 16:00:07 with GFORTRAN compiler (ver. 10.3.0)
This software has been approved for release by the U.S. Geological
Survey (USGS). Although the software has been subjected to rigorous
review, the USGS reserves the right to update the software as needed
pursuant to further analysis and review. No warranty, expressed or
implied, is made by the USGS or the U.S. Government as to the
functionality of the software and related material nor shall the
fact of release constitute any such warranty. Furthermore, the
software is released on condition that neither the USGS nor the U.S.
Government shall be held liable for any damages resulting from its
authorized or unauthorized use. Also refer to the USGS Water
Resources Software User Rights Notice for complete use, copyright,
and distribution information.
Run start date and time (yyyy/mm/dd hh:mm:ss): 2021/10/25 16:51:56
Writing simulation list file: mfsim.lst
Using Simulation name file: mfsim.nam
Solving: Stress period: 1 Time step: 1
Run end date and time (yyyy/mm/dd hh:mm:ss): 2021/10/25 16:51:56
Elapsed run time: 0.078 Seconds
Normal termination of simulation.
###Markdown
Use the the `.output` model attribute to create a zonebudget modelThe `.output` attribute allows the user to access model output and create zonebudget models easily. The user only needs to pass in a zone array to create a zonebudget model!
###Code
# let's get our idomain array from the model, split it into two zones, and use it as a zone array
ml = sim.get_model("freyberg")
zones = ml.modelgrid.idomain
zones[0, 20:] = np.where(zones[0, 20:] != 0, 2, 0)
plt.imshow(zones[0])
plt.colorbar();
# now let's build a zonebudget model and run it!
zonbud = ml.output.zonebudget(zones)
zonbud.change_model_ws(cpth)
zonbud.write_input()
zonbud.run_model(exe_name=zb6_exe);
###Output
FloPy is using the following executable to run the model: /Users/jdhughes/.local/bin/zbud6
ZONEBUDGET Version 6
U.S. GEOLOGICAL SURVEY
VERSION 6.2.2 07/30/2021
.........
Normal Termination
###Markdown
Getting the zonebudget outputWe can then get the output as a recarray using the `.get_budget()` method or as a pandas dataframe using the `.get_dataframes()` method.
###Code
zonbud.get_budget()
# get the net flux using net=True flag
zonbud.get_dataframes(net=True)
# we can also pivot the data into a spreadsheet like format
zonbud.get_dataframes(net=True, pivot=True)
# or get a volumetric budget by supplying modeltime
mt = ml.modeltime
# budget recarray must be pivoted to get volumetric budget!
zonbud.get_volumetric_budget(
mt, recarray=zonbud.get_budget(net=True, pivot=True)
)
###Output
_____no_output_____
###Markdown
FloPy ZoneBudget ExampleThis notebook demonstrates how to use the `ZoneBudget` class to extract budget information from the cell by cell budget file using an array of zones.First set the path and import the required packages. The flopy path doesn't have to be set if you install flopy from a binary installer. If you want to run this notebook, you have to set the path to your own flopy path.
###Code
import os
import sys
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
# run installed version of flopy or add local path
try:
import flopy
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('pandas version: {}'.format(pd.__version__))
print('flopy version: {}'.format(flopy.__version__))
# Set path to example datafiles
loadpth = os.path.join('..', 'data', 'zonbud_examples')
cbc_f = os.path.join(loadpth, 'freyberg.gitcbc')
###Output
_____no_output_____
###Markdown
Read File Containing ZonesUsing the `read_zbarray` utility, we can import zonebudget-style array files.
###Code
from flopy.utils import read_zbarray
zone_file = os.path.join(loadpth, 'zonef_mlt.zbr')
zon = read_zbarray(zone_file)
nlay, nrow, ncol = zon.shape
fig = plt.figure(figsize=(10, 4))
for lay in range(nlay):
ax = fig.add_subplot(1, nlay, lay+1)
im = ax.pcolormesh(zon[lay, ::-1, :])
cbar = plt.colorbar(im)
plt.gca().set_aspect('equal')
plt.show()
###Output
/Users/jdhughes/Documents/Development/flopy_git/flopy_fork/flopy/utils/zonbud.py:2944: PendingDeprecationWarning: Deprecation planned for version 3.3.5, use ZoneBudget.read_zone_file()
warnings.warn(
###Markdown
Extract Budget Information from ZoneBudget ObjectAt the core of the `ZoneBudget` object is a numpy structured array. The class provides some wrapper functions to help us interogate the array and save it to disk.
###Code
# Create a ZoneBudget object and get the budget record array
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 1096))
zb.get_budget()
# Get a list of the unique budget record names
zb.get_record_names()
# Look at a subset of fluxes
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zb.get_budget(names=names)
# Look at fluxes in from zone 2
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zones = ['ZONE_2']
zb.get_budget(names=names, zones=zones)
# Look at all of the mass-balance records
names = ['TOTAL_IN', 'TOTAL_OUT', 'IN-OUT', 'PERCENT_DISCREPANCY']
zb.get_budget(names=names)
###Output
_____no_output_____
###Markdown
Convert UnitsThe `ZoneBudget` class supports the use of mathematical operators and returns a new copy of the object.
###Code
cmd = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 0))
cfd = cmd / 35.3147
inyr = (cfd / (250 * 250)) * 365 * 12
cmdbud = cmd.get_budget()
cfdbud = cfd.get_budget()
inyrbud = inyr.get_budget()
names = ['FROM_RECHARGE']
rowidx = np.in1d(cmdbud['name'], names)
colidx = 'ZONE_1'
print('{:,.1f} cubic meters/day'.format(cmdbud[rowidx][colidx][0]))
print('{:,.1f} cubic feet/day'.format(cfdbud[rowidx][colidx][0]))
print('{:,.1f} inches/year'.format(inyrbud[rowidx][colidx][0]))
cmd is cfd
###Output
_____no_output_____
###Markdown
Alias NamesA dictionary of {zone: "alias"} pairs can be passed to replace the typical "ZONE_X" fieldnames of the `ZoneBudget` structured array with more descriptive names.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=[1097.], aliases=aliases)
zb.get_budget()
###Output
_____no_output_____
###Markdown
Return the Budgets as a Pandas DataFrameSet `kstpkper` and `totim` keyword args to `None` (or omit) to return all times.The `get_dataframes()` method will return a DataFrame multi-indexed on `totim` and `name`.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_dataframes()
###Output
_____no_output_____
###Markdown
Slice the multi-index dataframe to retrieve a subset of the budget.NOTE: We can pass "names" directly to the `get_dataframes()` method to return a subset of reocrds. By omitting the `"FROM_"` or `"TO_"` prefix we get both.
###Code
dateidx1 = 1095.
dateidx2 = 1097.
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Look at pumpage (`TO_WELLS`) as a percentage of recharge (`FROM_RECHARGE`)
###Code
dateidx1 = 1095.
dateidx2 = 1097.
zones = ['SURF']
# Pull out the individual records of interest
rech = df.loc[(slice(dateidx1, dateidx2), ['FROM_RECHARGE']), :][zones]
pump = df.loc[(slice(dateidx1, dateidx2), ['TO_WELLS']), :][zones]
# Remove the "record" field from the index so we can
# take the difference of the two DataFrames
rech = rech.reset_index()
rech = rech.set_index(['totim'])
rech = rech[zones]
pump = pump.reset_index()
pump = pump.set_index(['totim'])
pump = pump[zones] * -1
# Compute pumping as a percentage of recharge
(pump / rech) * 100.
###Output
_____no_output_____
###Markdown
Pass `start_datetime` and `timeunit` keyword arguments to return a dataframe with a datetime multi-index
###Code
dateidx1 = pd.Timestamp('1972-12-29')
dateidx2 = pd.Timestamp('1972-12-30')
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(start_datetime='1970-01-01', timeunit='D', names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Pass `index_key` to indicate which fields to use in the multi-index (default is "totim"; valid keys are "totim" and "kstpkper")
###Code
df = zb.get_dataframes(index_key='kstpkper')
df.head()
###Output
_____no_output_____
###Markdown
Write Budget Output to CSVWe can write the resulting recarray to a csv file with the `.to_csv()` method of the `ZoneBudget` object.
###Code
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=[(0, 0), (0, 1096)])
f_out = os.path.join('data', 'Example_output.csv')
zb.to_csv(f_out)
# Read the file in to see the contents
try:
import pandas as pd
print(pd.read_csv(f_out).to_string(index=False))
except:
with open(fname, 'r') as f:
for line in f.readlines():
print('\t'.join(line.split(',')))
###Output
totim time_step stress_period name ZONE_0 ZONE_1 ZONE_2 ZONE_3
1.0 0 0 FROM_STORAGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_CONSTANT_HEAD 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_WELLS 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_DRAINS 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_RECHARGE 0.0 6222.673300 18.062912 36.125824
1.0 0 0 FROM_ZONE_0 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_ZONE_1 0.0 0.000000 4275.257300 491.945070
1.0 0 0 FROM_ZONE_2 0.0 2744.821800 0.000000 2115.654000
1.0 0 0 FROM_ZONE_3 0.0 451.545720 1215.952300 0.000000
1.0 0 0 TOTAL_IN 0.0 9419.041000 5509.272500 2643.725000
1.0 0 0 TO_STORAGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_CONSTANT_HEAD 0.0 821.283200 648.806100 976.233600
1.0 0 0 TO_WELLS 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_DRAINS 0.0 3832.150000 0.000000 0.000000
1.0 0 0 TO_RECHARGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_0 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_1 0.0 0.000000 2744.821800 451.545720
1.0 0 0 TO_ZONE_2 0.0 4275.257300 0.000000 1215.952300
1.0 0 0 TO_ZONE_3 0.0 491.945070 2115.654000 0.000000
1.0 0 0 TOTAL_OUT 0.0 9420.636000 5509.282000 2643.731400
1.0 0 0 IN-OUT 0.0 1.594727 0.009766 0.006348
1.0 0 0 PERCENT_DISCREPANCY NaN 0.016929 0.000177 0.000240
1097.0 0 1096 FROM_STORAGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_CONSTANT_HEAD 0.0 0.000000 231.566590 86.217200
1097.0 0 1096 FROM_WELLS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_DRAINS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_RECHARGE 0.0 5145.581500 14.936376 29.872751
1097.0 0 1096 FROM_ZONE_0 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_ZONE_1 0.0 0.000000 3475.123500 138.600450
1097.0 0 1096 FROM_ZONE_2 0.0 3269.318800 0.000000 1764.655300
1097.0 0 1096 FROM_ZONE_3 0.0 192.186040 1528.048200 0.000000
1097.0 0 1096 TOTAL_IN 0.0 8607.086000 5249.675000 2019.345700
1097.0 0 1096 TO_STORAGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_CONSTANT_HEAD 0.0 230.548360 215.701500 299.113800
1097.0 0 1096 TO_WELLS 0.0 4762.800000 0.000000 0.000000
1097.0 0 1096 TO_DRAINS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_RECHARGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_0 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_1 0.0 0.000000 3269.318800 192.186040
1097.0 0 1096 TO_ZONE_2 0.0 3475.123500 0.000000 1528.048200
1097.0 0 1096 TO_ZONE_3 0.0 138.600450 1764.655300 0.000000
1097.0 0 1096 TOTAL_OUT 0.0 8607.072000 5249.676000 2019.348000
1097.0 0 1096 IN-OUT 0.0 0.013672 0.000977 0.002319
1097.0 0 1096 PERCENT_DISCREPANCY NaN 0.000159 0.000019 0.000115
###Markdown
Net BudgetUsing the "net" keyword argument, we can request a net budget for each zone/record name or for a subset of zones and record names. Note that we can identify the record names we want without the added `"_IN"` or `"_OUT"` string suffix.
###Code
zon = np.ones((nlay, nrow, ncol), int)
zon[1, :, :] = 2
zon[2, :, :] = 3
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_budget(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df = zb.get_dataframes(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df.head(6)
###Output
_____no_output_____
###Markdown
Plot Budget ComponentsThe following is a function that can be used to better visualize the budget components using matplotlib.
###Code
def tick_label_formatter_comma_sep(x, pos):
return '{:,.0f}'.format(x)
def volumetric_budget_bar_plot(values_in, values_out, labels, **kwargs):
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = plt.gca()
x_pos = np.arange(len(values_in))
rects_in = ax.bar(x_pos, values_in, align='center', alpha=0.5)
x_pos = np.arange(len(values_out))
rects_out = ax.bar(x_pos, values_out, align='center', alpha=0.5)
plt.xticks(list(x_pos), labels)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=90)
ax.get_yaxis().set_major_formatter(mpl.ticker.FuncFormatter(tick_label_formatter_comma_sep))
ymin, ymax = ax.get_ylim()
if ymax != 0:
if abs(ymin) / ymax < .33:
ymin = -(ymax * .5)
else:
ymin *= 1.35
else:
ymin *= 1.35
plt.ylim([ymin, ymax * 1.25])
for i, rect in enumerate(rects_in):
label = '{:,.0f}'.format(values_in[i])
height = values_in[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymax)
vertical_alignment = 'bottom'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
for i, rect in enumerate(rects_out):
label = '{:,.0f}'.format(values_out[i])
height = values_out[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymin)
vertical_alignment = 'top'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
# horizontal line indicating zero
ax.plot([rects_in[0].get_x() - rects_in[0].get_width() / 2,
rects_in[-1].get_x() + rects_in[-1].get_width()], [0, 0], "k")
return rects_in, rects_out
fig = plt.figure(figsize=(16, 5))
times = [2., 500., 1000., 1095.]
for idx, t in enumerate(times):
ax = fig.add_subplot(1, len(times), idx + 1)
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=None, totim=t, aliases=aliases)
recname = 'STORAGE'
values_in = zb.get_dataframes(names='FROM_{}'.format(recname)).T.squeeze()
values_out = zb.get_dataframes(names='TO_{}'.format(recname)).T.squeeze() * -1
labels = values_in.index.tolist()
rects_in, rects_out = volumetric_budget_bar_plot(values_in, values_out, labels, ax=ax)
plt.ylabel('Volumetric rate, in Mgal/d')
plt.title('{} @ totim = {}'.format(recname, t))
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Zonebudget for Modflow 6 (`ZoneBudget6`)This section shows how to build and run a Zonebudget when working with a MODFLOW 6 model. First let's load a model
###Code
mf6_exe = "mf6"
zb6_exe = "zbud6"
if platform.system().lower() == "windows":
mf6_exe += ".exe"
zb6_exe += ".exe"
sim_ws = os.path.join('..', 'data', 'mf6-freyberg')
cpth = os.path.join(".", "temp")
sim = flopy.mf6.MFSimulation.load(sim_ws=sim_ws, exe_name=mf6_exe)
sim.simulation_data.mfpath.set_sim_path(cpth)
sim.write_simulation()
sim.run_simulation();
###Output
loading simulation...
loading simulation name file...
loading tdis package...
loading model gwf6...
loading package dis...
loading package ic...
WARNING: Block "options" is not a valid block name for file type ic.
loading package oc...
loading package npf...
loading package sto...
loading package chd...
loading package riv...
loading package wel...
loading package rch...
loading ims package freyberg...
writing simulation...
writing simulation name file...
writing simulation tdis package...
writing ims package freyberg...
writing model freyberg...
writing model name file...
writing package dis...
writing package ic...
writing package oc...
writing package npf...
writing package sto...
writing package chd_0...
writing package riv_0...
writing package wel_0...
writing package rch_0...
FloPy is using the following executable to run the model: /Users/jdhughes/.local/bin/mf6
MODFLOW 6
U.S. GEOLOGICAL SURVEY MODULAR HYDROLOGIC MODEL
VERSION 6.2.1 02/18/2021
***DEVELOP MODE***
MODFLOW 6 compiled Jul 15 2021 11:13:51 with GFORTRAN compiler (ver. 10.3.0)
This software has been approved for release by the U.S. Geological
Survey (USGS). Although the software has been subjected to rigorous
review, the USGS reserves the right to update the software as needed
pursuant to further analysis and review. No warranty, expressed or
implied, is made by the USGS or the U.S. Government as to the
functionality of the software and related material nor shall the
fact of release constitute any such warranty. Furthermore, the
software is released on condition that neither the USGS nor the U.S.
Government shall be held liable for any damages resulting from its
authorized or unauthorized use. Also refer to the USGS Water
Resources Software User Rights Notice for complete use, copyright,
and distribution information.
Run start date and time (yyyy/mm/dd hh:mm:ss): 2021/07/28 15:25:24
Writing simulation list file: mfsim.lst
Using Simulation name file: mfsim.nam
Solving: Stress period: 1 Time step: 1
Run end date and time (yyyy/mm/dd hh:mm:ss): 2021/07/28 15:25:24
Elapsed run time: 0.081 Seconds
Normal termination of simulation.
###Markdown
Use the the `.output` model attribute to create a zonebudget modelThe `.output` attribute allows the user to access model output and create zonebudget models easily. The user only needs to pass in a zone array to create a zonebudget model!
###Code
# let's get our idomain array from the model, split it into two zones, and use it as a zone array
ml = sim.get_model('freyberg')
zones = ml.modelgrid.idomain
zones[0, 20:] = np.where(zones[0, 20:] != 0,
2,
0)
plt.imshow(zones[0])
plt.colorbar();
# now let's build a zonebudget model and run it!
zonbud = ml.output.zonebudget(zones)
zonbud.change_model_ws(cpth)
zonbud.write_input()
zonbud.run_model(exe_name=zb6_exe);
###Output
FloPy is using the following executable to run the model: /Users/jdhughes/.local/bin/zbud6
ZONEBUDGET Version 6
U.S. GEOLOGICAL SURVEY
VERSION 6.2.1 02/18/2021
.........
Normal Termination
###Markdown
Getting the zonebudget outputWe can then get the output as a recarray using the `.get_budget()` method or as a pandas dataframe using the `.get_dataframes()` method.
###Code
zonbud.get_budget()
# get the net flux using net=True flag
zonbud.get_dataframes(net=True)
# we can also pivot the data into a spreadsheet like format
zonbud.get_dataframes(net=True, pivot=True)
# or get a volumetric budget by supplying modeltime
mt = ml.modeltime
# budget recarray must be pivoted to get volumetric budget!
zonbud.get_volumetric_budget(mt, recarray=zonbud.get_budget(net=True, pivot=True))
###Output
_____no_output_____
###Markdown
FloPy ZoneBudget ExampleThis notebook demonstrates how to use the `ZoneBudget` class to extract budget information from the cell by cell budget file using an array of zones.First set the path and import the required packages. The flopy path doesn't have to be set if you install flopy from a binary installer. If you want to run this notebook, you have to set the path to your own flopy path.
###Code
import os
import sys
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
# run installed version of flopy or add local path
try:
import flopy
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('pandas version: {}'.format(pd.__version__))
print('flopy version: {}'.format(flopy.__version__))
# Set path to example datafiles
loadpth = os.path.join('..', 'data', 'zonbud_examples')
cbc_f = os.path.join(loadpth, 'freyberg.gitcbc')
###Output
_____no_output_____
###Markdown
Read File Containing ZonesUsing the `read_zbarray` utility, we can import zonebudget-style array files.
###Code
from flopy.utils import read_zbarray
zone_file = os.path.join(loadpth, 'zonef_mlt.zbr')
zon = read_zbarray(zone_file)
nlay, nrow, ncol = zon.shape
fig = plt.figure(figsize=(10, 4))
for lay in range(nlay):
ax = fig.add_subplot(1, nlay, lay+1)
im = ax.pcolormesh(zon[lay, ::-1, :])
cbar = plt.colorbar(im)
plt.gca().set_aspect('equal')
plt.show()
###Output
_____no_output_____
###Markdown
Extract Budget Information from ZoneBudget ObjectAt the core of the `ZoneBudget` object is a numpy structured array. The class provides some wrapper functions to help us interogate the array and save it to disk.
###Code
# Create a ZoneBudget object and get the budget record array
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 1096))
zb.get_budget()
# Get a list of the unique budget record names
zb.get_record_names()
# Look at a subset of fluxes
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zb.get_budget(names=names)
# Look at fluxes in from zone 2
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zones = ['ZONE_2']
zb.get_budget(names=names, zones=zones)
# Look at all of the mass-balance records
names = ['TOTAL_IN', 'TOTAL_OUT', 'IN-OUT', 'PERCENT_DISCREPANCY']
zb.get_budget(names=names)
###Output
_____no_output_____
###Markdown
Convert UnitsThe `ZoneBudget` class supports the use of mathematical operators and returns a new copy of the object.
###Code
cmd = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 0))
cfd = cmd / 35.3147
inyr = (cfd / (250 * 250)) * 365 * 12
cmdbud = cmd.get_budget()
cfdbud = cfd.get_budget()
inyrbud = inyr.get_budget()
names = ['FROM_RECHARGE']
rowidx = np.in1d(cmdbud['name'], names)
colidx = 'ZONE_1'
print('{:,.1f} cubic meters/day'.format(cmdbud[rowidx][colidx][0]))
print('{:,.1f} cubic feet/day'.format(cfdbud[rowidx][colidx][0]))
print('{:,.1f} inches/year'.format(inyrbud[rowidx][colidx][0]))
cmd is cfd
###Output
_____no_output_____
###Markdown
Alias NamesA dictionary of {zone: "alias"} pairs can be passed to replace the typical "ZONE_X" fieldnames of the `ZoneBudget` structured array with more descriptive names.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=[1097.], aliases=aliases)
zb.get_budget()
###Output
_____no_output_____
###Markdown
Return the Budgets as a Pandas DataFrameSet `kstpkper` and `totim` keyword args to `None` (or omit) to return all times.The `get_dataframes()` method will return a DataFrame multi-indexed on `totim` and `name`.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_dataframes()
###Output
_____no_output_____
###Markdown
Slice the multi-index dataframe to retrieve a subset of the budget.NOTE: We can pass "names" directly to the `get_dataframes()` method to return a subset of reocrds. By omitting the `"FROM_"` or `"TO_"` prefix we get both.
###Code
dateidx1 = 1095.
dateidx2 = 1097.
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Look at pumpage (`TO_WELLS`) as a percentage of recharge (`FROM_RECHARGE`)
###Code
dateidx1 = 1095.
dateidx2 = 1097.
zones = ['SURF']
# Pull out the individual records of interest
rech = df.loc[(slice(dateidx1, dateidx2), ['FROM_RECHARGE']), :][zones]
pump = df.loc[(slice(dateidx1, dateidx2), ['TO_WELLS']), :][zones]
# Remove the "record" field from the index so we can
# take the difference of the two DataFrames
rech = rech.reset_index()
rech = rech.set_index(['totim'])
rech = rech[zones]
pump = pump.reset_index()
pump = pump.set_index(['totim'])
pump = pump[zones] * -1
# Compute pumping as a percentage of recharge
(pump / rech) * 100.
###Output
_____no_output_____
###Markdown
Pass `start_datetime` and `timeunit` keyword arguments to return a dataframe with a datetime multi-index
###Code
dateidx1 = pd.Timestamp('1972-12-29')
dateidx2 = pd.Timestamp('1972-12-30')
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(start_datetime='1970-01-01', timeunit='D', names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Pass `index_key` to indicate which fields to use in the multi-index (default is "totim"; valid keys are "totim" and "kstpkper")
###Code
df = zb.get_dataframes(index_key='kstpkper')
df.head()
###Output
_____no_output_____
###Markdown
Write Budget Output to CSVWe can write the resulting recarray to a csv file with the `.to_csv()` method of the `ZoneBudget` object.
###Code
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=[(0, 0), (0, 1096)])
f_out = os.path.join('data', 'Example_output.csv')
zb.to_csv(f_out)
# Read the file in to see the contents
try:
import pandas as pd
print(pd.read_csv(f_out).to_string(index=False))
except:
with open(fname, 'r') as f:
for line in f.readlines():
print('\t'.join(line.split(',')))
###Output
totim time_step stress_period name ZONE_0 ZONE_1 ZONE_2 ZONE_3
1.0 0 0 FROM_STORAGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_CONSTANT_HEAD 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_WELLS 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_DRAINS 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_RECHARGE 0.0 6222.673300 18.062912 36.125824
1.0 0 0 FROM_ZONE_0 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_ZONE_1 0.0 0.000000 4275.257300 491.945070
1.0 0 0 FROM_ZONE_2 0.0 2744.821800 0.000000 2115.654000
1.0 0 0 FROM_ZONE_3 0.0 451.545720 1215.952300 0.000000
1.0 0 0 TOTAL_IN 0.0 9419.041000 5509.272500 2643.725000
1.0 0 0 TO_STORAGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_CONSTANT_HEAD 0.0 821.283200 648.806100 976.233600
1.0 0 0 TO_WELLS 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_DRAINS 0.0 3832.150000 0.000000 0.000000
1.0 0 0 TO_RECHARGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_0 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_1 0.0 0.000000 2744.821800 451.545720
1.0 0 0 TO_ZONE_2 0.0 4275.257300 0.000000 1215.952300
1.0 0 0 TO_ZONE_3 0.0 491.945070 2115.654000 0.000000
1.0 0 0 TOTAL_OUT 0.0 9420.636000 5509.282000 2643.731400
1.0 0 0 IN-OUT 0.0 1.594727 0.009766 0.006348
1.0 0 0 PERCENT_DISCREPANCY NaN 0.016929 0.000177 0.000240
1097.0 0 1096 FROM_STORAGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_CONSTANT_HEAD 0.0 0.000000 231.566590 86.217200
1097.0 0 1096 FROM_WELLS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_DRAINS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_RECHARGE 0.0 5145.581500 14.936376 29.872751
1097.0 0 1096 FROM_ZONE_0 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_ZONE_1 0.0 0.000000 3475.123500 138.600450
1097.0 0 1096 FROM_ZONE_2 0.0 3269.318800 0.000000 1764.655300
1097.0 0 1096 FROM_ZONE_3 0.0 192.186040 1528.048200 0.000000
1097.0 0 1096 TOTAL_IN 0.0 8607.086000 5249.675000 2019.345700
1097.0 0 1096 TO_STORAGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_CONSTANT_HEAD 0.0 230.548360 215.701500 299.113800
1097.0 0 1096 TO_WELLS 0.0 4762.800000 0.000000 0.000000
1097.0 0 1096 TO_DRAINS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_RECHARGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_0 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_1 0.0 0.000000 3269.318800 192.186040
1097.0 0 1096 TO_ZONE_2 0.0 3475.123500 0.000000 1528.048200
1097.0 0 1096 TO_ZONE_3 0.0 138.600450 1764.655300 0.000000
1097.0 0 1096 TOTAL_OUT 0.0 8607.072000 5249.676000 2019.348000
1097.0 0 1096 IN-OUT 0.0 0.013672 0.000977 0.002319
1097.0 0 1096 PERCENT_DISCREPANCY NaN 0.000159 0.000019 0.000115
###Markdown
Net BudgetUsing the "net" keyword argument, we can request a net budget for each zone/record name or for a subset of zones and record names. Note that we can identify the record names we want without the added `"_IN"` or `"_OUT"` string suffix.
###Code
zon = np.ones((nlay, nrow, ncol), np.int)
zon[1, :, :] = 2
zon[2, :, :] = 3
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_budget(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df = zb.get_dataframes(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df.head(6)
###Output
_____no_output_____
###Markdown
Plot Budget ComponentsThe following is a function that can be used to better visualize the budget components using matplotlib.
###Code
def tick_label_formatter_comma_sep(x, pos):
return '{:,.0f}'.format(x)
def volumetric_budget_bar_plot(values_in, values_out, labels, **kwargs):
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = plt.gca()
x_pos = np.arange(len(values_in))
rects_in = ax.bar(x_pos, values_in, align='center', alpha=0.5)
x_pos = np.arange(len(values_out))
rects_out = ax.bar(x_pos, values_out, align='center', alpha=0.5)
plt.xticks(list(x_pos), labels)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=90)
ax.get_yaxis().set_major_formatter(mpl.ticker.FuncFormatter(tick_label_formatter_comma_sep))
ymin, ymax = ax.get_ylim()
if ymax != 0:
if abs(ymin) / ymax < .33:
ymin = -(ymax * .5)
else:
ymin *= 1.35
else:
ymin *= 1.35
plt.ylim([ymin, ymax * 1.25])
for i, rect in enumerate(rects_in):
label = '{:,.0f}'.format(values_in[i])
height = values_in[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymax)
vertical_alignment = 'bottom'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
for i, rect in enumerate(rects_out):
label = '{:,.0f}'.format(values_out[i])
height = values_out[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymin)
vertical_alignment = 'top'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
# horizontal line indicating zero
ax.plot([rects_in[0].get_x() - rects_in[0].get_width() / 2,
rects_in[-1].get_x() + rects_in[-1].get_width()], [0, 0], "k")
return rects_in, rects_out
fig = plt.figure(figsize=(16, 5))
times = [2., 500., 1000., 1095.]
for idx, t in enumerate(times):
ax = fig.add_subplot(1, len(times), idx + 1)
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=None, totim=t, aliases=aliases)
recname = 'STORAGE'
values_in = zb.get_dataframes(names='FROM_{}'.format(recname)).T.squeeze()
values_out = zb.get_dataframes(names='TO_{}'.format(recname)).T.squeeze() * -1
labels = values_in.index.tolist()
rects_in, rects_out = volumetric_budget_bar_plot(values_in, values_out, labels, ax=ax)
plt.ylabel('Volumetric rate, in Mgal/d')
plt.title('{} @ totim = {}'.format(recname, t))
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
FloPy ZoneBudget ExampleThis notebook demonstrates how to use the `ZoneBudget` class to extract budget information from the cell by cell budget file using an array of zones.First set the path and import the required packages. The flopy path doesn't have to be set if you install flopy from a binary installer. If you want to run this notebook, you have to set the path to your own flopy path.
###Code
import os
import sys
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
# run installed version of flopy or add local path
try:
import flopy
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('pandas version: {}'.format(pd.__version__))
print('flopy version: {}'.format(flopy.__version__))
# Set path to example datafiles
loadpth = os.path.join('..', 'data', 'zonbud_examples')
cbc_f = os.path.join(loadpth, 'freyberg.gitcbc')
###Output
_____no_output_____
###Markdown
Read File Containing ZonesUsing the `read_zbarray` utility, we can import zonebudget-style array files.
###Code
from flopy.utils import read_zbarray
zone_file = os.path.join(loadpth, 'zonef_mlt.zbr')
zon = read_zbarray(zone_file)
nlay, nrow, ncol = zon.shape
fig = plt.figure(figsize=(10, 4))
for lay in range(nlay):
ax = fig.add_subplot(1, nlay, lay+1)
im = ax.pcolormesh(zon[lay, :, :])
cbar = plt.colorbar(im)
plt.gca().set_aspect('equal')
plt.show()
###Output
_____no_output_____
###Markdown
Extract Budget Information from ZoneBudget ObjectAt the core of the `ZoneBudget` object is a numpy structured array. The class provides some wrapper functions to help us interogate the array and save it to disk.
###Code
# Create a ZoneBudget object and get the budget record array
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 1096))
zb.get_budget()
# Get a list of the unique budget record names
zb.get_record_names()
# Look at a subset of fluxes
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zb.get_budget(names=names)
# Look at fluxes in from zone 2
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zones = ['ZONE_2']
zb.get_budget(names=names, zones=zones)
# Look at all of the mass-balance records
names = ['TOTAL_IN', 'TOTAL_OUT', 'IN-OUT', 'PERCENT_DISCREPANCY']
zb.get_budget(names=names)
###Output
_____no_output_____
###Markdown
Convert UnitsThe `ZoneBudget` class supports the use of mathematical operators and returns a new copy of the object.
###Code
cmd = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 0))
cfd = cmd / 35.3147
inyr = (cfd / (250 * 250)) * 365 * 12
cmdbud = cmd.get_budget()
cfdbud = cfd.get_budget()
inyrbud = inyr.get_budget()
names = ['FROM_RECHARGE']
rowidx = np.in1d(cmdbud['name'], names)
colidx = 'ZONE_1'
print('{:,.1f} cubic meters/day'.format(cmdbud[rowidx][colidx][0]))
print('{:,.1f} cubic feet/day'.format(cfdbud[rowidx][colidx][0]))
print('{:,.1f} inches/year'.format(inyrbud[rowidx][colidx][0]))
cmd is cfd
###Output
_____no_output_____
###Markdown
Alias NamesA dictionary of {zone: "alias"} pairs can be passed to replace the typical "ZONE_X" fieldnames of the `ZoneBudget` structured array with more descriptive names.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=[1097.], aliases=aliases)
zb.get_budget()
###Output
_____no_output_____
###Markdown
Return the Budgets as a Pandas DataFrameSet `kstpkper` and `totim` keyword args to `None` (or omit) to return all times.The `get_dataframes()` method will return a DataFrame multi-indexed on `totim` and `name`.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_dataframes()
###Output
_____no_output_____
###Markdown
Slice the multi-index dataframe to retrieve a subset of the budget.NOTE: We can pass "names" directly to the `get_dataframes()` method to return a subset of reocrds. By omitting the `"FROM_"` or `"TO_"` prefix we get both.
###Code
dateidx1 = 1095.
dateidx2 = 1097.
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Look at pumpage (`TO_WELLS`) as a percentage of recharge (`FROM_RECHARGE`)
###Code
dateidx1 = 1095.
dateidx2 = 1097.
zones = ['SURF']
# Pull out the individual records of interest
rech = df.loc[(slice(dateidx1, dateidx2), ['FROM_RECHARGE']), :][zones]
pump = df.loc[(slice(dateidx1, dateidx2), ['TO_WELLS']), :][zones]
# Remove the "record" field from the index so we can
# take the difference of the two DataFrames
rech = rech.reset_index()
rech = rech.set_index(['totim'])
rech = rech[zones]
pump = pump.reset_index()
pump = pump.set_index(['totim'])
pump = pump[zones] * -1
# Compute pumping as a percentage of recharge
(pump / rech) * 100.
###Output
_____no_output_____
###Markdown
Pass `start_datetime` and `timeunit` keyword arguments to return a dataframe with a datetime multi-index
###Code
dateidx1 = pd.Timestamp('1972-12-29')
dateidx2 = pd.Timestamp('1972-12-30')
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(start_datetime='1970-01-01', timeunit='D', names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Pass `index_key` to indicate which fields to use in the multi-index (default is "totim"; valid keys are "totim" and "kstpkper")
###Code
df = zb.get_dataframes(index_key='kstpkper')
df.head()
###Output
_____no_output_____
###Markdown
Write Budget Output to CSVWe can write the resulting recarray to a csv file with the `.to_csv()` method of the `ZoneBudget` object.
###Code
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=[(0, 0), (0, 1096)])
f_out = os.path.join('data', 'Example_output.csv')
zb.to_csv(f_out)
# Read the file in to see the contents
try:
import pandas as pd
print(pd.read_csv(f_out).to_string(index=False))
except:
with open(fname, 'r') as f:
for line in f.readlines():
print('\t'.join(line.split(',')))
###Output
totim time_step stress_period name ZONE_1 ZONE_2 ZONE_3
1.0 0 0 FROM_STORAGE 0.000000 0.000000 0.000000
1.0 0 0 FROM_CONSTANT_HEAD 0.000000 0.000000 0.000000
1.0 0 0 FROM_WELLS 0.000000 0.000000 0.000000
1.0 0 0 FROM_DRAINS 0.000000 0.000000 0.000000
1.0 0 0 FROM_RECHARGE 6222.673300 18.062912 36.125824
1.0 0 0 FROM_ZONE_0 0.000000 0.000000 0.000000
1.0 0 0 FROM_ZONE_1 0.000000 4275.257300 491.945070
1.0 0 0 FROM_ZONE_2 2744.821800 0.000000 2115.654000
1.0 0 0 FROM_ZONE_3 451.545720 1215.952300 0.000000
1.0 0 0 TOTAL_IN 9419.041000 5509.272500 2643.725000
1.0 0 0 TO_STORAGE 0.000000 0.000000 0.000000
1.0 0 0 TO_CONSTANT_HEAD 821.283200 648.806100 976.233600
1.0 0 0 TO_WELLS 0.000000 0.000000 0.000000
1.0 0 0 TO_DRAINS 3832.150000 0.000000 0.000000
1.0 0 0 TO_RECHARGE 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_0 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_1 0.000000 2744.821800 451.545720
1.0 0 0 TO_ZONE_2 4275.257300 0.000000 1215.952300
1.0 0 0 TO_ZONE_3 491.945070 2115.654000 0.000000
1.0 0 0 TOTAL_OUT 9420.636000 5509.282000 2643.731400
1.0 0 0 IN-OUT 1.594727 0.009766 0.006348
1.0 0 0 PERCENT_DISCREPANCY 0.016929 0.000177 0.000240
1097.0 0 1096 FROM_STORAGE 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_CONSTANT_HEAD 0.000000 231.566590 86.217200
1097.0 0 1096 FROM_WELLS 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_DRAINS 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_RECHARGE 5145.581500 14.936376 29.872751
1097.0 0 1096 FROM_ZONE_0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_ZONE_1 0.000000 3475.123500 138.600450
1097.0 0 1096 FROM_ZONE_2 3269.318800 0.000000 1764.655300
1097.0 0 1096 FROM_ZONE_3 192.186040 1528.048200 0.000000
1097.0 0 1096 TOTAL_IN 8607.086000 5249.675000 2019.345700
1097.0 0 1096 TO_STORAGE 0.000000 0.000000 0.000000
1097.0 0 1096 TO_CONSTANT_HEAD 230.548360 215.701500 299.113800
1097.0 0 1096 TO_WELLS 4762.800000 0.000000 0.000000
1097.0 0 1096 TO_DRAINS 0.000000 0.000000 0.000000
1097.0 0 1096 TO_RECHARGE 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_1 0.000000 3269.318800 192.186040
1097.0 0 1096 TO_ZONE_2 3475.123500 0.000000 1528.048200
1097.0 0 1096 TO_ZONE_3 138.600450 1764.655300 0.000000
1097.0 0 1096 TOTAL_OUT 8607.072000 5249.676000 2019.348000
1097.0 0 1096 IN-OUT 0.013672 0.000977 0.002319
1097.0 0 1096 PERCENT_DISCREPANCY 0.000159 0.000019 0.000115
###Markdown
Net BudgetUsing the "net" keyword argument, we can request a net budget for each zone/record name or for a subset of zones and record names. Note that we can identify the record names we want without the added `"_IN"` or `"_OUT"` string suffix.
###Code
zon = np.ones((nlay, nrow, ncol), np.int)
zon[1, :, :] = 2
zon[2, :, :] = 3
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_budget(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df = zb.get_dataframes(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df.head(6)
###Output
_____no_output_____
###Markdown
Plot Budget ComponentsThe following is a function that can be used to better visualize the budget components using matplotlib.
###Code
def tick_label_formatter_comma_sep(x, pos):
return '{:,.0f}'.format(x)
def volumetric_budget_bar_plot(values_in, values_out, labels, **kwargs):
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = plt.gca()
x_pos = np.arange(len(values_in))
rects_in = ax.bar(x_pos, values_in, align='center', alpha=0.5)
x_pos = np.arange(len(values_out))
rects_out = ax.bar(x_pos, values_out, align='center', alpha=0.5)
plt.xticks(list(x_pos), labels)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=90)
ax.get_yaxis().set_major_formatter(mpl.ticker.FuncFormatter(tick_label_formatter_comma_sep))
ymin, ymax = ax.get_ylim()
if ymax != 0:
if abs(ymin) / ymax < .33:
ymin = -(ymax * .5)
else:
ymin *= 1.35
else:
ymin *= 1.35
plt.ylim([ymin, ymax * 1.25])
for i, rect in enumerate(rects_in):
label = '{:,.0f}'.format(values_in[i])
height = values_in[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymax)
vertical_alignment = 'bottom'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
for i, rect in enumerate(rects_out):
label = '{:,.0f}'.format(values_out[i])
height = values_out[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymin)
vertical_alignment = 'top'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
# horizontal line indicating zero
ax.plot([rects_in[0].get_x() - rects_in[0].get_width() / 2,
rects_in[-1].get_x() + rects_in[-1].get_width()], [0, 0], "k")
return rects_in, rects_out
fig = plt.figure(figsize=(16, 5))
times = [2., 500., 1000., 1095.]
for idx, t in enumerate(times):
ax = fig.add_subplot(1, len(times), idx + 1)
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=None, totim=t, aliases=aliases)
recname = 'STORAGE'
values_in = zb.get_dataframes(names='FROM_{}'.format(recname)).T.squeeze()
values_out = zb.get_dataframes(names='TO_{}'.format(recname)).T.squeeze() * -1
labels = values_in.index.tolist()
rects_in, rects_out = volumetric_budget_bar_plot(values_in, values_out, labels, ax=ax)
plt.ylabel('Volumetric rate, in Mgal/d')
plt.title('{} @ totim = {}'.format(recname, t))
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
FloPy ZoneBudget ExampleThis notebook demonstrates how to use the `ZoneBudget` class to extract budget information from the cell by cell budget file using an array of zones.First set the path and import the required packages. The flopy path doesn't have to be set if you install flopy from a binary installer. If you want to run this notebook, you have to set the path to your own flopy path.
###Code
import os
import sys
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
# run installed version of flopy or add local path
try:
import flopy
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('pandas version: {}'.format(pd.__version__))
print('flopy version: {}'.format(flopy.__version__))
# Set path to example datafiles
loadpth = os.path.join('..', 'data', 'zonbud_examples')
cbc_f = os.path.join(loadpth, 'freyberg.gitcbc')
###Output
_____no_output_____
###Markdown
Read File Containing ZonesUsing the `read_zbarray` utility, we can import zonebudget-style array files.
###Code
from flopy.utils import read_zbarray
zone_file = os.path.join(loadpth, 'zonef_mlt.zbr')
zon = read_zbarray(zone_file)
nlay, nrow, ncol = zon.shape
fig = plt.figure(figsize=(10, 4))
for lay in range(nlay):
ax = fig.add_subplot(1, nlay, lay+1)
im = ax.pcolormesh(zon[lay, ::-1, :])
cbar = plt.colorbar(im)
plt.gca().set_aspect('equal')
plt.show()
###Output
/Users/jdhughes/Documents/Development/flopy_git/flopy_fork/flopy/utils/zonbud.py:2944: PendingDeprecationWarning: Deprecation planned for version 3.3.5, use ZoneBudget.read_zone_file()
warnings.warn(
###Markdown
Extract Budget Information from ZoneBudget ObjectAt the core of the `ZoneBudget` object is a numpy structured array. The class provides some wrapper functions to help us interogate the array and save it to disk.
###Code
# Create a ZoneBudget object and get the budget record array
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 1096))
zb.get_budget()
# Get a list of the unique budget record names
zb.get_record_names()
# Look at a subset of fluxes
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zb.get_budget(names=names)
# Look at fluxes in from zone 2
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zones = ['ZONE_2']
zb.get_budget(names=names, zones=zones)
# Look at all of the mass-balance records
names = ['TOTAL_IN', 'TOTAL_OUT', 'IN-OUT', 'PERCENT_DISCREPANCY']
zb.get_budget(names=names)
###Output
_____no_output_____
###Markdown
Convert UnitsThe `ZoneBudget` class supports the use of mathematical operators and returns a new copy of the object.
###Code
cmd = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 0))
cfd = cmd / 35.3147
inyr = (cfd / (250 * 250)) * 365 * 12
cmdbud = cmd.get_budget()
cfdbud = cfd.get_budget()
inyrbud = inyr.get_budget()
names = ['FROM_RECHARGE']
rowidx = np.in1d(cmdbud['name'], names)
colidx = 'ZONE_1'
print('{:,.1f} cubic meters/day'.format(cmdbud[rowidx][colidx][0]))
print('{:,.1f} cubic feet/day'.format(cfdbud[rowidx][colidx][0]))
print('{:,.1f} inches/year'.format(inyrbud[rowidx][colidx][0]))
cmd is cfd
###Output
_____no_output_____
###Markdown
Alias NamesA dictionary of {zone: "alias"} pairs can be passed to replace the typical "ZONE_X" fieldnames of the `ZoneBudget` structured array with more descriptive names.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=[1097.], aliases=aliases)
zb.get_budget()
###Output
_____no_output_____
###Markdown
Return the Budgets as a Pandas DataFrameSet `kstpkper` and `totim` keyword args to `None` (or omit) to return all times.The `get_dataframes()` method will return a DataFrame multi-indexed on `totim` and `name`.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_dataframes()
###Output
_____no_output_____
###Markdown
Slice the multi-index dataframe to retrieve a subset of the budget.NOTE: We can pass "names" directly to the `get_dataframes()` method to return a subset of reocrds. By omitting the `"FROM_"` or `"TO_"` prefix we get both.
###Code
dateidx1 = 1095.
dateidx2 = 1097.
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Look at pumpage (`TO_WELLS`) as a percentage of recharge (`FROM_RECHARGE`)
###Code
dateidx1 = 1095.
dateidx2 = 1097.
zones = ['SURF']
# Pull out the individual records of interest
rech = df.loc[(slice(dateidx1, dateidx2), ['FROM_RECHARGE']), :][zones]
pump = df.loc[(slice(dateidx1, dateidx2), ['TO_WELLS']), :][zones]
# Remove the "record" field from the index so we can
# take the difference of the two DataFrames
rech = rech.reset_index()
rech = rech.set_index(['totim'])
rech = rech[zones]
pump = pump.reset_index()
pump = pump.set_index(['totim'])
pump = pump[zones] * -1
# Compute pumping as a percentage of recharge
(pump / rech) * 100.
###Output
_____no_output_____
###Markdown
Pass `start_datetime` and `timeunit` keyword arguments to return a dataframe with a datetime multi-index
###Code
dateidx1 = pd.Timestamp('1972-12-29')
dateidx2 = pd.Timestamp('1972-12-30')
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(start_datetime='1970-01-01', timeunit='D', names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Pass `index_key` to indicate which fields to use in the multi-index (default is "totim"; valid keys are "totim" and "kstpkper")
###Code
df = zb.get_dataframes(index_key='kstpkper')
df.head()
###Output
_____no_output_____
###Markdown
Write Budget Output to CSVWe can write the resulting recarray to a csv file with the `.to_csv()` method of the `ZoneBudget` object.
###Code
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=[(0, 0), (0, 1096)])
f_out = os.path.join('data', 'Example_output.csv')
zb.to_csv(f_out)
# Read the file in to see the contents
try:
import pandas as pd
print(pd.read_csv(f_out).to_string(index=False))
except:
with open(fname, 'r') as f:
for line in f.readlines():
print('\t'.join(line.split(',')))
###Output
totim time_step stress_period name ZONE_0 ZONE_1 ZONE_2 ZONE_3
1.0 0 0 FROM_STORAGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_CONSTANT_HEAD 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_WELLS 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_DRAINS 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_RECHARGE 0.0 6222.673300 18.062912 36.125824
1.0 0 0 FROM_ZONE_0 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_ZONE_1 0.0 0.000000 4275.257300 491.945070
1.0 0 0 FROM_ZONE_2 0.0 2744.821800 0.000000 2115.654000
1.0 0 0 FROM_ZONE_3 0.0 451.545720 1215.952300 0.000000
1.0 0 0 TOTAL_IN 0.0 9419.041000 5509.272500 2643.725000
1.0 0 0 TO_STORAGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_CONSTANT_HEAD 0.0 821.283200 648.806100 976.233600
1.0 0 0 TO_WELLS 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_DRAINS 0.0 3832.150000 0.000000 0.000000
1.0 0 0 TO_RECHARGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_0 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_1 0.0 0.000000 2744.821800 451.545720
1.0 0 0 TO_ZONE_2 0.0 4275.257300 0.000000 1215.952300
1.0 0 0 TO_ZONE_3 0.0 491.945070 2115.654000 0.000000
1.0 0 0 TOTAL_OUT 0.0 9420.636000 5509.282000 2643.731400
1.0 0 0 IN-OUT 0.0 1.594727 0.009766 0.006348
1.0 0 0 PERCENT_DISCREPANCY NaN 0.016929 0.000177 0.000240
1097.0 0 1096 FROM_STORAGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_CONSTANT_HEAD 0.0 0.000000 231.566590 86.217200
1097.0 0 1096 FROM_WELLS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_DRAINS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_RECHARGE 0.0 5145.581500 14.936376 29.872751
1097.0 0 1096 FROM_ZONE_0 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_ZONE_1 0.0 0.000000 3475.123500 138.600450
1097.0 0 1096 FROM_ZONE_2 0.0 3269.318800 0.000000 1764.655300
1097.0 0 1096 FROM_ZONE_3 0.0 192.186040 1528.048200 0.000000
1097.0 0 1096 TOTAL_IN 0.0 8607.086000 5249.675000 2019.345700
1097.0 0 1096 TO_STORAGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_CONSTANT_HEAD 0.0 230.548360 215.701500 299.113800
1097.0 0 1096 TO_WELLS 0.0 4762.800000 0.000000 0.000000
1097.0 0 1096 TO_DRAINS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_RECHARGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_0 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_1 0.0 0.000000 3269.318800 192.186040
1097.0 0 1096 TO_ZONE_2 0.0 3475.123500 0.000000 1528.048200
1097.0 0 1096 TO_ZONE_3 0.0 138.600450 1764.655300 0.000000
1097.0 0 1096 TOTAL_OUT 0.0 8607.072000 5249.676000 2019.348000
1097.0 0 1096 IN-OUT 0.0 0.013672 0.000977 0.002319
1097.0 0 1096 PERCENT_DISCREPANCY NaN 0.000159 0.000019 0.000115
###Markdown
Net BudgetUsing the "net" keyword argument, we can request a net budget for each zone/record name or for a subset of zones and record names. Note that we can identify the record names we want without the added `"_IN"` or `"_OUT"` string suffix.
###Code
zon = np.ones((nlay, nrow, ncol), int)
zon[1, :, :] = 2
zon[2, :, :] = 3
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_budget(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df = zb.get_dataframes(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df.head(6)
###Output
_____no_output_____
###Markdown
Plot Budget ComponentsThe following is a function that can be used to better visualize the budget components using matplotlib.
###Code
def tick_label_formatter_comma_sep(x, pos):
return '{:,.0f}'.format(x)
def volumetric_budget_bar_plot(values_in, values_out, labels, **kwargs):
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = plt.gca()
x_pos = np.arange(len(values_in))
rects_in = ax.bar(x_pos, values_in, align='center', alpha=0.5)
x_pos = np.arange(len(values_out))
rects_out = ax.bar(x_pos, values_out, align='center', alpha=0.5)
plt.xticks(list(x_pos), labels)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=90)
ax.get_yaxis().set_major_formatter(mpl.ticker.FuncFormatter(tick_label_formatter_comma_sep))
ymin, ymax = ax.get_ylim()
if ymax != 0:
if abs(ymin) / ymax < .33:
ymin = -(ymax * .5)
else:
ymin *= 1.35
else:
ymin *= 1.35
plt.ylim([ymin, ymax * 1.25])
for i, rect in enumerate(rects_in):
label = '{:,.0f}'.format(values_in[i])
height = values_in[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymax)
vertical_alignment = 'bottom'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
for i, rect in enumerate(rects_out):
label = '{:,.0f}'.format(values_out[i])
height = values_out[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymin)
vertical_alignment = 'top'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
# horizontal line indicating zero
ax.plot([rects_in[0].get_x() - rects_in[0].get_width() / 2,
rects_in[-1].get_x() + rects_in[-1].get_width()], [0, 0], "k")
return rects_in, rects_out
fig = plt.figure(figsize=(16, 5))
times = [2., 500., 1000., 1095.]
for idx, t in enumerate(times):
ax = fig.add_subplot(1, len(times), idx + 1)
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=None, totim=t, aliases=aliases)
recname = 'STORAGE'
values_in = zb.get_dataframes(names='FROM_{}'.format(recname)).T.squeeze()
values_out = zb.get_dataframes(names='TO_{}'.format(recname)).T.squeeze() * -1
labels = values_in.index.tolist()
rects_in, rects_out = volumetric_budget_bar_plot(values_in, values_out, labels, ax=ax)
plt.ylabel('Volumetric rate, in Mgal/d')
plt.title('{} @ totim = {}'.format(recname, t))
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Zonebudget for Modflow 6 (`ZoneBudget6`)This section shows how to build and run a Zonebudget when working with a MODFLOW 6 model. First let's load a model
###Code
mf6_exe = "mf6"
zb6_exe = "zbud6"
if platform.system().lower() == "windows":
mf6_exe += ".exe"
zb6_exe += ".exe"
sim_ws = os.path.join('..', 'data', 'mf6-freyberg')
cpth = os.path.join(".", "temp")
sim = flopy.mf6.MFSimulation.load(sim_ws=sim_ws, exe_name=mf6_exe)
sim.simulation_data.mfpath.set_sim_path(cpth)
sim.write_simulation()
sim.run_simulation();
###Output
loading simulation...
loading simulation name file...
loading tdis package...
loading model gwf6...
loading package dis...
loading package ic...
WARNING: Block "options" is not a valid block name for file type ic.
loading package oc...
loading package npf...
loading package sto...
loading package chd...
loading package riv...
loading package wel...
loading package rch...
loading ims package freyberg...
WARNING: MFFileMgt's set_sim_path has been deprecated. Please use MFSimulation's set_sim_path in the future.
writing simulation...
writing simulation name file...
writing simulation tdis package...
writing ims package freyberg...
writing model freyberg...
writing model name file...
writing package dis...
writing package ic...
writing package oc...
writing package npf...
writing package sto...
writing package chd_0...
writing package riv_0...
writing package wel_0...
writing package rch_0...
FloPy is using the following executable to run the model: /Users/jdhughes/.local/bin/mf6
###Markdown
Use the the `.output` model attribute to create a zonebudget modelThe `.output` attribute allows the user to access model output and create zonebudget models easily. The user only needs to pass in a zone array to create a zonebudget model!
###Code
# let's get our idomain array from the model, split it into two zones, and use it as a zone array
ml = sim.get_model('freyberg')
zones = ml.modelgrid.idomain
zones[0, 20:] = np.where(zones[0, 20:] != 0,
2,
0)
plt.imshow(zones[0])
plt.colorbar();
# now let's build a zonebudget model and run it!
zonbud = ml.output.zonebudget(zones)
zonbud.change_model_ws(cpth)
zonbud.write_input()
zonbud.run_model(exe_name=zb6_exe);
###Output
FloPy is using the following executable to run the model: /Users/jdhughes/.local/bin/zbud6
ZONEBUDGET Version 6
U.S. GEOLOGICAL SURVEY
VERSION 6.2.2 07/30/2021
.........
Normal Termination
###Markdown
Getting the zonebudget outputWe can then get the output as a recarray using the `.get_budget()` method or as a pandas dataframe using the `.get_dataframes()` method.
###Code
zonbud.get_budget()
# get the net flux using net=True flag
zonbud.get_dataframes(net=True)
# we can also pivot the data into a spreadsheet like format
zonbud.get_dataframes(net=True, pivot=True)
# or get a volumetric budget by supplying modeltime
mt = ml.modeltime
# budget recarray must be pivoted to get volumetric budget!
zonbud.get_volumetric_budget(mt, recarray=zonbud.get_budget(net=True, pivot=True))
###Output
_____no_output_____
###Markdown
FloPy ZoneBudget ExampleThis notebook demonstrates how to use the `ZoneBudget` class to extract budget information from the cell by cell budget file using an array of zones.First set the path and import the required packages. The flopy path doesn't have to be set if you install flopy from a binary installer. If you want to run this notebook, you have to set the path to your own flopy path.
###Code
import os
import sys
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
# run installed version of flopy or add local path
try:
import flopy
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('pandas version: {}'.format(pd.__version__))
print('flopy version: {}'.format(flopy.__version__))
# Set path to example datafiles
loadpth = os.path.join('..', 'data', 'zonbud_examples')
cbc_f = os.path.join(loadpth, 'freyberg.gitcbc')
###Output
_____no_output_____
###Markdown
Read File Containing ZonesUsing the `ZoneBudget.read_zone_file()` utility, we can import zonebudget-style array files.
###Code
from flopy.utils import ZoneBudget
zone_file = os.path.join(loadpth, 'zonef_mlt.zbr')
zon = ZoneBudget.read_zone_file(zone_file)
nlay, nrow, ncol = zon.shape
fig = plt.figure(figsize=(10, 4))
for lay in range(nlay):
ax = fig.add_subplot(1, nlay, lay+1)
im = ax.pcolormesh(zon[lay, ::-1, :])
cbar = plt.colorbar(im)
plt.gca().set_aspect('equal')
plt.show()
###Output
_____no_output_____
###Markdown
Extract Budget Information from ZoneBudget ObjectAt the core of the `ZoneBudget` object is a numpy structured array. The class provides some wrapper functions to help us interogate the array and save it to disk.
###Code
# Create a ZoneBudget object and get the budget record array
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 1096))
zb.get_budget()
# Get a list of the unique budget record names
zb.get_record_names()
# Look at a subset of fluxes
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zb.get_budget(names=names)
# Look at fluxes in from zone 2
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zones = ['ZONE_2']
zb.get_budget(names=names, zones=zones)
# Look at all of the mass-balance records
names = ['TOTAL_IN', 'TOTAL_OUT', 'IN-OUT', 'PERCENT_DISCREPANCY']
zb.get_budget(names=names)
###Output
_____no_output_____
###Markdown
Convert UnitsThe `ZoneBudget` class supports the use of mathematical operators and returns a new copy of the object.
###Code
cmd = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 0))
cfd = cmd / 35.3147
inyr = (cfd / (250 * 250)) * 365 * 12
cmdbud = cmd.get_budget()
cfdbud = cfd.get_budget()
inyrbud = inyr.get_budget()
names = ['FROM_RECHARGE']
rowidx = np.in1d(cmdbud['name'], names)
colidx = 'ZONE_1'
print('{:,.1f} cubic meters/day'.format(cmdbud[rowidx][colidx][0]))
print('{:,.1f} cubic feet/day'.format(cfdbud[rowidx][colidx][0]))
print('{:,.1f} inches/year'.format(inyrbud[rowidx][colidx][0]))
cmd is cfd
###Output
_____no_output_____
###Markdown
Alias NamesA dictionary of {zone: "alias"} pairs can be passed to replace the typical "ZONE_X" fieldnames of the `ZoneBudget` structured array with more descriptive names.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=[1097.], aliases=aliases)
zb.get_budget()
###Output
_____no_output_____
###Markdown
Return the Budgets as a Pandas DataFrameSet `kstpkper` and `totim` keyword args to `None` (or omit) to return all times.The `get_dataframes()` method will return a DataFrame multi-indexed on `totim` and `name`.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_dataframes()
###Output
_____no_output_____
###Markdown
Slice the multi-index dataframe to retrieve a subset of the budget.NOTE: We can pass "names" directly to the `get_dataframes()` method to return a subset of reocrds. By omitting the `"FROM_"` or `"TO_"` prefix we get both.
###Code
dateidx1 = 1095.
dateidx2 = 1097.
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Look at pumpage (`TO_WELLS`) as a percentage of recharge (`FROM_RECHARGE`)
###Code
dateidx1 = 1095.
dateidx2 = 1097.
zones = ['SURF']
# Pull out the individual records of interest
rech = df.loc[(slice(dateidx1, dateidx2), ['FROM_RECHARGE']), :][zones]
pump = df.loc[(slice(dateidx1, dateidx2), ['TO_WELLS']), :][zones]
# Remove the "record" field from the index so we can
# take the difference of the two DataFrames
rech = rech.reset_index()
rech = rech.set_index(['totim'])
rech = rech[zones]
pump = pump.reset_index()
pump = pump.set_index(['totim'])
pump = pump[zones] * -1
# Compute pumping as a percentage of recharge
(pump / rech) * 100.
###Output
_____no_output_____
###Markdown
Pass `start_datetime` and `timeunit` keyword arguments to return a dataframe with a datetime multi-index
###Code
dateidx1 = pd.Timestamp('1972-12-29')
dateidx2 = pd.Timestamp('1972-12-30')
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(start_datetime='1970-01-01', timeunit='D', names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Pass `index_key` to indicate which fields to use in the multi-index (default is "totim"; valid keys are "totim" and "kstpkper")
###Code
df = zb.get_dataframes(index_key='kstpkper')
df.head()
###Output
_____no_output_____
###Markdown
Write Budget Output to CSVWe can write the resulting recarray to a csv file with the `.to_csv()` method of the `ZoneBudget` object.
###Code
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=[(0, 0), (0, 1096)])
f_out = os.path.join('data', 'Example_output.csv')
zb.to_csv(f_out)
# Read the file in to see the contents
try:
import pandas as pd
print(pd.read_csv(f_out).to_string(index=False))
except:
with open(fname, 'r') as f:
for line in f.readlines():
print('\t'.join(line.split(',')))
###Output
totim time_step stress_period name ZONE_0 ZONE_1 ZONE_2 ZONE_3
1.0 0 0 FROM_STORAGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_CONSTANT_HEAD 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_WELLS 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_DRAINS 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_RECHARGE 0.0 6222.673300 18.062912 36.125824
1.0 0 0 FROM_ZONE_0 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_ZONE_1 0.0 0.000000 4275.257300 491.945070
1.0 0 0 FROM_ZONE_2 0.0 2744.821800 0.000000 2115.654000
1.0 0 0 FROM_ZONE_3 0.0 451.545720 1215.952300 0.000000
1.0 0 0 TOTAL_IN 0.0 9419.041000 5509.272500 2643.725000
1.0 0 0 TO_STORAGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_CONSTANT_HEAD 0.0 821.283200 648.806100 976.233600
1.0 0 0 TO_WELLS 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_DRAINS 0.0 3832.150000 0.000000 0.000000
1.0 0 0 TO_RECHARGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_0 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_1 0.0 0.000000 2744.821800 451.545720
1.0 0 0 TO_ZONE_2 0.0 4275.257300 0.000000 1215.952300
1.0 0 0 TO_ZONE_3 0.0 491.945070 2115.654000 0.000000
1.0 0 0 TOTAL_OUT 0.0 9420.636000 5509.282000 2643.731400
1.0 0 0 IN-OUT 0.0 1.594727 0.009766 0.006348
1.0 0 0 PERCENT_DISCREPANCY NaN 0.016929 0.000177 0.000240
1097.0 0 1096 FROM_STORAGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_CONSTANT_HEAD 0.0 0.000000 231.566590 86.217200
1097.0 0 1096 FROM_WELLS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_DRAINS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_RECHARGE 0.0 5145.581500 14.936376 29.872751
1097.0 0 1096 FROM_ZONE_0 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_ZONE_1 0.0 0.000000 3475.123500 138.600450
1097.0 0 1096 FROM_ZONE_2 0.0 3269.318800 0.000000 1764.655300
1097.0 0 1096 FROM_ZONE_3 0.0 192.186040 1528.048200 0.000000
1097.0 0 1096 TOTAL_IN 0.0 8607.086000 5249.675000 2019.345700
1097.0 0 1096 TO_STORAGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_CONSTANT_HEAD 0.0 230.548360 215.701500 299.113800
1097.0 0 1096 TO_WELLS 0.0 4762.800000 0.000000 0.000000
1097.0 0 1096 TO_DRAINS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_RECHARGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_0 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_1 0.0 0.000000 3269.318800 192.186040
1097.0 0 1096 TO_ZONE_2 0.0 3475.123500 0.000000 1528.048200
1097.0 0 1096 TO_ZONE_3 0.0 138.600450 1764.655300 0.000000
1097.0 0 1096 TOTAL_OUT 0.0 8607.072000 5249.676000 2019.348000
1097.0 0 1096 IN-OUT 0.0 0.013672 0.000977 0.002319
1097.0 0 1096 PERCENT_DISCREPANCY NaN 0.000159 0.000019 0.000115
###Markdown
Net BudgetUsing the "net" keyword argument, we can request a net budget for each zone/record name or for a subset of zones and record names. Note that we can identify the record names we want without the added `"_IN"` or `"_OUT"` string suffix.
###Code
zon = np.ones((nlay, nrow, ncol), int)
zon[1, :, :] = 2
zon[2, :, :] = 3
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_budget(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df = zb.get_dataframes(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df.head(6)
###Output
_____no_output_____
###Markdown
Plot Budget ComponentsThe following is a function that can be used to better visualize the budget components using matplotlib.
###Code
def tick_label_formatter_comma_sep(x, pos):
return '{:,.0f}'.format(x)
def volumetric_budget_bar_plot(values_in, values_out, labels, **kwargs):
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = plt.gca()
x_pos = np.arange(len(values_in))
rects_in = ax.bar(x_pos, values_in, align='center', alpha=0.5)
x_pos = np.arange(len(values_out))
rects_out = ax.bar(x_pos, values_out, align='center', alpha=0.5)
plt.xticks(list(x_pos), labels)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=90)
ax.get_yaxis().set_major_formatter(mpl.ticker.FuncFormatter(tick_label_formatter_comma_sep))
ymin, ymax = ax.get_ylim()
if ymax != 0:
if abs(ymin) / ymax < .33:
ymin = -(ymax * .5)
else:
ymin *= 1.35
else:
ymin *= 1.35
plt.ylim([ymin, ymax * 1.25])
for i, rect in enumerate(rects_in):
label = '{:,.0f}'.format(values_in[i])
height = values_in[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymax)
vertical_alignment = 'bottom'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
for i, rect in enumerate(rects_out):
label = '{:,.0f}'.format(values_out[i])
height = values_out[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymin)
vertical_alignment = 'top'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
# horizontal line indicating zero
ax.plot([rects_in[0].get_x() - rects_in[0].get_width() / 2,
rects_in[-1].get_x() + rects_in[-1].get_width()], [0, 0], "k")
return rects_in, rects_out
fig = plt.figure(figsize=(16, 5))
times = [2., 500., 1000., 1095.]
for idx, t in enumerate(times):
ax = fig.add_subplot(1, len(times), idx + 1)
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=None, totim=t, aliases=aliases)
recname = 'STORAGE'
values_in = zb.get_dataframes(names='FROM_{}'.format(recname)).T.squeeze()
values_out = zb.get_dataframes(names='TO_{}'.format(recname)).T.squeeze() * -1
labels = values_in.index.tolist()
rects_in, rects_out = volumetric_budget_bar_plot(values_in, values_out, labels, ax=ax)
plt.ylabel('Volumetric rate, in Mgal/d')
plt.title('{} @ totim = {}'.format(recname, t))
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Zonebudget for Modflow 6 (`ZoneBudget6`)This section shows how to build and run a Zonebudget when working with a MODFLOW 6 model. First let's load a model
###Code
mf6_exe = "mf6"
zb6_exe = "zbud6"
if platform.system().lower() == "windows":
mf6_exe += ".exe"
zb6_exe += ".exe"
sim_ws = os.path.join('..', 'data', 'mf6-freyberg')
cpth = os.path.join(".", "temp")
sim = flopy.mf6.MFSimulation.load(sim_ws=sim_ws, exe_name=mf6_exe)
sim.simulation_data.mfpath.set_sim_path(cpth)
sim.write_simulation()
sim.run_simulation();
###Output
loading simulation...
loading simulation name file...
loading tdis package...
loading model gwf6...
loading package dis...
loading package ic...
WARNING: Block "options" is not a valid block name for file type ic.
loading package oc...
loading package npf...
loading package sto...
loading package chd...
loading package riv...
loading package wel...
loading package rch...
loading ims package freyberg...
WARNING: MFFileMgt's set_sim_path has been deprecated. Please use MFSimulation's set_sim_path in the future.
writing simulation...
writing simulation name file...
writing simulation tdis package...
writing ims package freyberg...
writing model freyberg...
writing model name file...
writing package dis...
writing package ic...
writing package oc...
writing package npf...
writing package sto...
writing package chd_0...
writing package riv_0...
writing package wel_0...
writing package rch_0...
FloPy is using the following executable to run the model: .\mf6.exe
MODFLOW 6
U.S. GEOLOGICAL SURVEY MODULAR HYDROLOGIC MODEL
VERSION 6.2.1 02/18/2021
MODFLOW 6 compiled Feb 18 2021 21:14:51 with IFORT compiler (ver. 19.10.3)
This software has been approved for release by the U.S. Geological
Survey (USGS). Although the software has been subjected to rigorous
review, the USGS reserves the right to update the software as needed
pursuant to further analysis and review. No warranty, expressed or
implied, is made by the USGS or the U.S. Government as to the
functionality of the software and related material nor shall the
fact of release constitute any such warranty. Furthermore, the
software is released on condition that neither the USGS nor the U.S.
Government shall be held liable for any damages resulting from its
authorized or unauthorized use. Also refer to the USGS Water
Resources Software User Rights Notice for complete use, copyright,
and distribution information.
Run start date and time (yyyy/mm/dd hh:mm:ss): 2021/08/20 17:28:52
Writing simulation list file: mfsim.lst
Using Simulation name file: mfsim.nam
Solving: Stress period: 1 Time step: 1
Run end date and time (yyyy/mm/dd hh:mm:ss): 2021/08/20 17:28:52
Elapsed run time: 0.124 Seconds
Normal termination of simulation.
###Markdown
Use the the `.output` model attribute to create a zonebudget modelThe `.output` attribute allows the user to access model output and create zonebudget models easily. The user only needs to pass in a zone array to create a zonebudget model!
###Code
# let's get our idomain array from the model, split it into two zones, and use it as a zone array
ml = sim.get_model('freyberg')
zones = ml.modelgrid.idomain
zones[0, 20:] = np.where(zones[0, 20:] != 0,
2,
0)
plt.imshow(zones[0])
plt.colorbar();
# now let's build a zonebudget model and run it!
zonbud = ml.output.zonebudget(zones)
zonbud.change_model_ws(cpth)
zonbud.write_input()
zonbud.run_model(exe_name=zb6_exe);
###Output
FloPy is using the following executable to run the model: .\zbud6.exe
ZONEBUDGET Version 6
U.S. GEOLOGICAL SURVEY
VERSION 6.2.1 02/18/2021
.........
Normal Termination
###Markdown
Getting the zonebudget outputWe can then get the output as a recarray using the `.get_budget()` method or as a pandas dataframe using the `.get_dataframes()` method.
###Code
zonbud.get_budget()
# get the net flux using net=True flag
zonbud.get_dataframes(net=True)
# we can also pivot the data into a spreadsheet like format
zonbud.get_dataframes(net=True, pivot=True)
# or get a volumetric budget by supplying modeltime
mt = ml.modeltime
# budget recarray must be pivoted to get volumetric budget!
zonbud.get_volumetric_budget(mt, recarray=zonbud.get_budget(net=True, pivot=True))
###Output
_____no_output_____
###Markdown
FloPy ZoneBudget ExampleThis notebook demonstrates how to use the `ZoneBudget` class to extract budget information from the cell by cell budget file using an array of zones.First set the path and import the required packages. The flopy path doesn't have to be set if you install flopy from a binary installer. If you want to run this notebook, you have to set the path to your own flopy path.
###Code
%matplotlib inline
import os
import sys
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
# run installed version of flopy or add local path
try:
import flopy
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('pandas version: {}'.format(pd.__version__))
print('flopy version: {}'.format(flopy.__version__))
# Set path to example datafiles
loadpth = os.path.join('..', 'data', 'zonbud_examples')
cbc_f = os.path.join(loadpth, 'freyberg.gitcbc')
###Output
_____no_output_____
###Markdown
Read File Containing ZonesUsing the `read_zbarray` utility, we can import zonebudget-style array files.
###Code
from flopy.utils import read_zbarray
zone_file = os.path.join(loadpth, 'zonef_mlt.zbr')
zon = read_zbarray(zone_file)
nlay, nrow, ncol = zon.shape
fig = plt.figure(figsize=(10, 4))
for lay in range(nlay):
ax = fig.add_subplot(1, nlay, lay+1)
im = ax.pcolormesh(zon[lay, ::-1, :], )
cbar = plt.colorbar(im)
plt.gca().set_aspect('equal')
plt.show()
np.unique(zon)
###Output
_____no_output_____
###Markdown
Extract Budget Information from ZoneBudget ObjectAt the core of the `ZoneBudget` object is a numpy structured array. The class provides some wrapper functions to help us interogate the array and save it to disk.
###Code
# Create a ZoneBudget object and get the budget record array
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 1096))
zb.get_budget()
# Get a list of the unique budget record names
zb.get_record_names()
# Look at a subset of fluxes
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zb.get_budget(names=names)
# Look at fluxes in from zone 2
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zones = ['ZONE_2']
zb.get_budget(names=names, zones=zones)
# Look at all of the mass-balance records
names = ['TOTAL_IN', 'TOTAL_OUT', 'IN-OUT', 'PERCENT_DISCREPANCY']
zb.get_budget(names=names)
###Output
_____no_output_____
###Markdown
Convert UnitsThe `ZoneBudget` class supports the use of mathematical operators and returns a new copy of the object.
###Code
cmd = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 0))
cfd = cmd / 35.3147
inyr = (cfd / (250 * 250)) * 365 * 12
cmdbud = cmd.get_budget()
cfdbud = cfd.get_budget()
inyrbud = inyr.get_budget()
names = ['FROM_RECHARGE']
rowidx = np.in1d(cmdbud['name'], names)
colidx = 'ZONE_1'
print('{:,.1f} cubic meters/day'.format(cmdbud[rowidx][colidx][0]))
print('{:,.1f} cubic feet/day'.format(cfdbud[rowidx][colidx][0]))
print('{:,.1f} inches/year'.format(inyrbud[rowidx][colidx][0]))
cmd is cfd
###Output
_____no_output_____
###Markdown
Alias NamesA dictionary of {zone: "alias"} pairs can be passed to replace the typical "ZONE_X" fieldnames of the `ZoneBudget` structured array with more descriptive names.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=[1097.], aliases=aliases)
zb.get_budget()
###Output
_____no_output_____
###Markdown
Return the Budgets as a Pandas DataFrameSet `kstpkper` and `totim` keyword args to `None` (or omit) to return all times.The `get_dataframes()` method will return a DataFrame multi-indexed on `totim` and `name`.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_dataframes()
###Output
_____no_output_____
###Markdown
Slice the multi-index dataframe to retrieve a subset of the budget.NOTE: We can pass "names" directly to the `get_dataframes()` method to return a subset of reocrds. By omitting the `"FROM_"` or `"TO_"` prefix we get both.
###Code
dateidx1 = 1095.
dateidx2 = 1097.
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Look at pumpage (`TO_WELLS`) as a percentage of recharge (`FROM_RECHARGE`)
###Code
dateidx1 = 1095.
dateidx2 = 1097.
zones = ['SURF']
# Pull out the individual records of interest
rech = df.loc[(slice(dateidx1, dateidx2), ['FROM_RECHARGE']), :][zones]
pump = df.loc[(slice(dateidx1, dateidx2), ['TO_WELLS']), :][zones]
# Remove the "record" field from the index so we can
# take the difference of the two DataFrames
rech = rech.reset_index()
rech = rech.set_index(['totim'])
rech = rech[zones]
pump = pump.reset_index()
pump = pump.set_index(['totim'])
pump = pump[zones] * -1
# Compute pumping as a percentage of recharge
(pump / rech) * 100.
###Output
_____no_output_____
###Markdown
Pass `start_datetime` and `timeunit` keyword arguments to return a dataframe with a datetime multi-index
###Code
dateidx1 = pd.Timestamp('1972-12-29')
dateidx2 = pd.Timestamp('1972-12-30')
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(start_datetime='1970-01-01', timeunit='D', names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Pass `index_key` to indicate which fields to use in the multi-index (default is "totim"; valid keys are "totim" and "kstpkper")
###Code
df = zb.get_dataframes(index_key='kstpkper')
df.head()
###Output
_____no_output_____
###Markdown
Write Budget Output to CSVWe can write the resulting recarray to a csv file with the `.to_csv()` method of the `ZoneBudget` object.
###Code
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=[(0, 0), (0, 1096)])
f_out = os.path.join('data', 'Example_output.csv')
zb.to_csv(f_out)
# Read the file in to see the contents
try:
import pandas as pd
print(pd.read_csv(f_out).to_string(index=False))
except:
with open(fname, 'r') as f:
for line in f.readlines():
print('\t'.join(line.split(',')))
###Output
totim time_step stress_period name ZONE_1 ZONE_2 ZONE_3
1.0 0 0 FROM_STORAGE 0.000000 0.000000 0.000000
1.0 0 0 FROM_CONSTANT_HEAD 0.000000 0.000000 0.000000
1.0 0 0 FROM_WELLS 0.000000 0.000000 0.000000
1.0 0 0 FROM_DRAINS 0.000000 0.000000 0.000000
1.0 0 0 FROM_RECHARGE 6222.673300 18.062912 36.125824
1.0 0 0 FROM_ZONE_0 0.000000 0.000000 0.000000
1.0 0 0 FROM_ZONE_1 0.000000 4275.257300 491.945070
1.0 0 0 FROM_ZONE_2 2744.821800 0.000000 2115.654000
1.0 0 0 FROM_ZONE_3 451.545720 1215.952300 0.000000
1.0 0 0 TOTAL_IN 9419.041000 5509.272500 2643.725000
1.0 0 0 TO_STORAGE 0.000000 0.000000 0.000000
1.0 0 0 TO_CONSTANT_HEAD 821.283200 648.806100 976.233600
1.0 0 0 TO_WELLS 0.000000 0.000000 0.000000
1.0 0 0 TO_DRAINS 3832.150000 0.000000 0.000000
1.0 0 0 TO_RECHARGE 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_0 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_1 0.000000 2744.821800 451.545720
1.0 0 0 TO_ZONE_2 4275.257300 0.000000 1215.952300
1.0 0 0 TO_ZONE_3 491.945070 2115.654000 0.000000
1.0 0 0 TOTAL_OUT 9420.636000 5509.282000 2643.731400
1.0 0 0 IN-OUT 1.594727 0.009766 0.006348
1.0 0 0 PERCENT_DISCREPANCY 0.016929 0.000177 0.000240
1097.0 0 1096 FROM_STORAGE 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_CONSTANT_HEAD 0.000000 231.566590 86.217200
1097.0 0 1096 FROM_WELLS 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_DRAINS 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_RECHARGE 5145.581500 14.936376 29.872751
1097.0 0 1096 FROM_ZONE_0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_ZONE_1 0.000000 3475.123500 138.600450
1097.0 0 1096 FROM_ZONE_2 3269.318800 0.000000 1764.655300
1097.0 0 1096 FROM_ZONE_3 192.186040 1528.048200 0.000000
1097.0 0 1096 TOTAL_IN 8607.086000 5249.675000 2019.345700
1097.0 0 1096 TO_STORAGE 0.000000 0.000000 0.000000
1097.0 0 1096 TO_CONSTANT_HEAD 230.548360 215.701500 299.113800
1097.0 0 1096 TO_WELLS 4762.800000 0.000000 0.000000
1097.0 0 1096 TO_DRAINS 0.000000 0.000000 0.000000
1097.0 0 1096 TO_RECHARGE 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_1 0.000000 3269.318800 192.186040
1097.0 0 1096 TO_ZONE_2 3475.123500 0.000000 1528.048200
1097.0 0 1096 TO_ZONE_3 138.600450 1764.655300 0.000000
1097.0 0 1096 TOTAL_OUT 8607.072000 5249.676000 2019.348000
1097.0 0 1096 IN-OUT 0.013672 0.000977 0.002319
1097.0 0 1096 PERCENT_DISCREPANCY 0.000159 0.000019 0.000115
###Markdown
Net BudgetUsing the "net" keyword argument, we can request a net budget for each zone/record name or for a subset of zones and record names. Note that we can identify the record names we want without the added `"_IN"` or `"_OUT"` string suffix.
###Code
zon = np.ones((nlay, nrow, ncol), np.int)
zon[1, :, :] = 2
zon[2, :, :] = 3
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_budget(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df = zb.get_dataframes(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df.head(6)
###Output
_____no_output_____
###Markdown
Plot Budget ComponentsThe following is a function that can be used to better visualize the budget components using matplotlib.
###Code
def tick_label_formatter_comma_sep(x, pos):
return '{:,.0f}'.format(x)
def volumetric_budget_bar_plot(values_in, values_out, labels, **kwargs):
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = plt.gca()
x_pos = np.arange(len(values_in))
rects_in = ax.bar(x_pos, values_in, align='center', alpha=0.5)
x_pos = np.arange(len(values_out))
rects_out = ax.bar(x_pos, values_out, align='center', alpha=0.5)
plt.xticks(list(x_pos), labels)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=90)
ax.get_yaxis().set_major_formatter(mpl.ticker.FuncFormatter(tick_label_formatter_comma_sep))
ymin, ymax = ax.get_ylim()
if ymax != 0:
if abs(ymin) / ymax < .33:
ymin = -(ymax * .5)
else:
ymin *= 1.35
else:
ymin *= 1.35
plt.ylim([ymin, ymax * 1.25])
for i, rect in enumerate(rects_in):
label = '{:,.0f}'.format(values_in[i])
height = values_in[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymax)
vertical_alignment = 'bottom'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
for i, rect in enumerate(rects_out):
label = '{:,.0f}'.format(values_out[i])
height = values_out[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymin)
vertical_alignment = 'top'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
# horizontal line indicating zero
ax.plot([rects_in[0].get_x() - rects_in[0].get_width() / 2,
rects_in[-1].get_x() + rects_in[-1].get_width()], [0, 0], "k")
return rects_in, rects_out
fig = plt.figure(figsize=(16, 5))
times = [2., 500., 1000., 1095.]
for idx, t in enumerate(times):
ax = fig.add_subplot(1, len(times), idx + 1)
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=None, totim=t, aliases=aliases)
recname = 'STORAGE'
values_in = zb.get_dataframes(names='FROM_{}'.format(recname)).T.squeeze()
values_out = zb.get_dataframes(names='TO_{}'.format(recname)).T.squeeze() * -1
labels = values_in.index.tolist()
rects_in, rects_out = volumetric_budget_bar_plot(values_in, values_out, labels, ax=ax)
plt.ylabel('Volumetric rate, in Mgal/d')
plt.title('{} @ totim = {}'.format(recname, t))
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
FloPy ZoneBudget ExampleThis notebook demonstrates how to use the `ZoneBudget` class to extract budget information from the cell by cell budget file using an array of zones.First set the path and import the required packages. The flopy path doesn't have to be set if you install flopy from a binary installer. If you want to run this notebook, you have to set the path to your own flopy path.
###Code
import os
import sys
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
# run installed version of flopy or add local path
try:
import flopy
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('pandas version: {}'.format(pd.__version__))
print('flopy version: {}'.format(flopy.__version__))
# Set path to example datafiles
loadpth = os.path.join('..', 'data', 'zonbud_examples')
cbc_f = os.path.join(loadpth, 'freyberg.gitcbc')
###Output
_____no_output_____
###Markdown
Read File Containing ZonesUsing the `read_zbarray` utility, we can import zonebudget-style array files.
###Code
from flopy.utils import read_zbarray
zone_file = os.path.join(loadpth, 'zonef_mlt.zbr')
zon = read_zbarray(zone_file)
nlay, nrow, ncol = zon.shape
fig = plt.figure(figsize=(10, 4))
for lay in range(nlay):
ax = fig.add_subplot(1, nlay, lay+1)
im = ax.pcolormesh(zon[lay, :, :])
cbar = plt.colorbar(im)
plt.gca().set_aspect('equal')
plt.show()
np.unique(zon)
###Output
_____no_output_____
###Markdown
Extract Budget Information from ZoneBudget ObjectAt the core of the `ZoneBudget` object is a numpy structured array. The class provides some wrapper functions to help us interogate the array and save it to disk.
###Code
# Create a ZoneBudget object and get the budget record array
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 1096))
zb.get_budget()
# Get a list of the unique budget record names
zb.get_record_names()
# Look at a subset of fluxes
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zb.get_budget(names=names)
# Look at fluxes in from zone 2
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zones = ['ZONE_2']
zb.get_budget(names=names, zones=zones)
# Look at all of the mass-balance records
names = ['TOTAL_IN', 'TOTAL_OUT', 'IN-OUT', 'PERCENT_DISCREPANCY']
zb.get_budget(names=names)
###Output
_____no_output_____
###Markdown
Convert UnitsThe `ZoneBudget` class supports the use of mathematical operators and returns a new copy of the object.
###Code
cmd = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 0))
cfd = cmd / 35.3147
inyr = (cfd / (250 * 250)) * 365 * 12
cmdbud = cmd.get_budget()
cfdbud = cfd.get_budget()
inyrbud = inyr.get_budget()
names = ['FROM_RECHARGE']
rowidx = np.in1d(cmdbud['name'], names)
colidx = 'ZONE_1'
print('{:,.1f} cubic meters/day'.format(cmdbud[rowidx][colidx][0]))
print('{:,.1f} cubic feet/day'.format(cfdbud[rowidx][colidx][0]))
print('{:,.1f} inches/year'.format(inyrbud[rowidx][colidx][0]))
cmd is cfd
###Output
_____no_output_____
###Markdown
Alias NamesA dictionary of {zone: "alias"} pairs can be passed to replace the typical "ZONE_X" fieldnames of the `ZoneBudget` structured array with more descriptive names.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=[1097.], aliases=aliases)
zb.get_budget()
###Output
_____no_output_____
###Markdown
Return the Budgets as a Pandas DataFrameSet `kstpkper` and `totim` keyword args to `None` (or omit) to return all times.The `get_dataframes()` method will return a DataFrame multi-indexed on `totim` and `name`.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_dataframes()
###Output
_____no_output_____
###Markdown
Slice the multi-index dataframe to retrieve a subset of the budget.NOTE: We can pass "names" directly to the `get_dataframes()` method to return a subset of reocrds. By omitting the `"FROM_"` or `"TO_"` prefix we get both.
###Code
dateidx1 = 1095.
dateidx2 = 1097.
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Look at pumpage (`TO_WELLS`) as a percentage of recharge (`FROM_RECHARGE`)
###Code
dateidx1 = 1095.
dateidx2 = 1097.
zones = ['SURF']
# Pull out the individual records of interest
rech = df.loc[(slice(dateidx1, dateidx2), ['FROM_RECHARGE']), :][zones]
pump = df.loc[(slice(dateidx1, dateidx2), ['TO_WELLS']), :][zones]
# Remove the "record" field from the index so we can
# take the difference of the two DataFrames
rech = rech.reset_index()
rech = rech.set_index(['totim'])
rech = rech[zones]
pump = pump.reset_index()
pump = pump.set_index(['totim'])
pump = pump[zones] * -1
# Compute pumping as a percentage of recharge
(pump / rech) * 100.
###Output
_____no_output_____
###Markdown
Pass `start_datetime` and `timeunit` keyword arguments to return a dataframe with a datetime multi-index
###Code
dateidx1 = pd.Timestamp('1972-12-29')
dateidx2 = pd.Timestamp('1972-12-30')
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(start_datetime='1970-01-01', timeunit='D', names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Pass `index_key` to indicate which fields to use in the multi-index (default is "totim"; valid keys are "totim" and "kstpkper")
###Code
df = zb.get_dataframes(index_key='kstpkper')
df.head()
###Output
_____no_output_____
###Markdown
Write Budget Output to CSVWe can write the resulting recarray to a csv file with the `.to_csv()` method of the `ZoneBudget` object.
###Code
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=[(0, 0), (0, 1096)])
f_out = os.path.join('data', 'Example_output.csv')
zb.to_csv(f_out)
# Read the file in to see the contents
try:
import pandas as pd
print(pd.read_csv(f_out).to_string(index=False))
except:
with open(fname, 'r') as f:
for line in f.readlines():
print('\t'.join(line.split(',')))
###Output
totim time_step stress_period name ZONE_1 ZONE_2 ZONE_3
1.0 0 0 FROM_STORAGE 0.000000 0.000000 0.000000
1.0 0 0 FROM_CONSTANT_HEAD 0.000000 0.000000 0.000000
1.0 0 0 FROM_WELLS 0.000000 0.000000 0.000000
1.0 0 0 FROM_DRAINS 0.000000 0.000000 0.000000
1.0 0 0 FROM_RECHARGE 6222.673300 18.062912 36.125824
1.0 0 0 FROM_ZONE_0 0.000000 0.000000 0.000000
1.0 0 0 FROM_ZONE_1 0.000000 4275.257300 491.945070
1.0 0 0 FROM_ZONE_2 2744.821800 0.000000 2115.654000
1.0 0 0 FROM_ZONE_3 451.545720 1215.952300 0.000000
1.0 0 0 TOTAL_IN 9419.041000 5509.272500 2643.725000
1.0 0 0 TO_STORAGE 0.000000 0.000000 0.000000
1.0 0 0 TO_CONSTANT_HEAD 821.283200 648.806100 976.233600
1.0 0 0 TO_WELLS 0.000000 0.000000 0.000000
1.0 0 0 TO_DRAINS 3832.150000 0.000000 0.000000
1.0 0 0 TO_RECHARGE 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_0 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_1 0.000000 2744.821800 451.545720
1.0 0 0 TO_ZONE_2 4275.257300 0.000000 1215.952300
1.0 0 0 TO_ZONE_3 491.945070 2115.654000 0.000000
1.0 0 0 TOTAL_OUT 9420.636000 5509.282000 2643.731400
1.0 0 0 IN-OUT 1.594727 0.009766 0.006348
1.0 0 0 PERCENT_DISCREPANCY 0.016929 0.000177 0.000240
1097.0 0 1096 FROM_STORAGE 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_CONSTANT_HEAD 0.000000 231.566590 86.217200
1097.0 0 1096 FROM_WELLS 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_DRAINS 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_RECHARGE 5145.581500 14.936376 29.872751
1097.0 0 1096 FROM_ZONE_0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_ZONE_1 0.000000 3475.123500 138.600450
1097.0 0 1096 FROM_ZONE_2 3269.318800 0.000000 1764.655300
1097.0 0 1096 FROM_ZONE_3 192.186040 1528.048200 0.000000
1097.0 0 1096 TOTAL_IN 8607.086000 5249.675000 2019.345700
1097.0 0 1096 TO_STORAGE 0.000000 0.000000 0.000000
1097.0 0 1096 TO_CONSTANT_HEAD 230.548360 215.701500 299.113800
1097.0 0 1096 TO_WELLS 4762.800000 0.000000 0.000000
1097.0 0 1096 TO_DRAINS 0.000000 0.000000 0.000000
1097.0 0 1096 TO_RECHARGE 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_1 0.000000 3269.318800 192.186040
1097.0 0 1096 TO_ZONE_2 3475.123500 0.000000 1528.048200
1097.0 0 1096 TO_ZONE_3 138.600450 1764.655300 0.000000
1097.0 0 1096 TOTAL_OUT 8607.072000 5249.676000 2019.348000
1097.0 0 1096 IN-OUT 0.013672 0.000977 0.002319
1097.0 0 1096 PERCENT_DISCREPANCY 0.000159 0.000019 0.000115
###Markdown
Net BudgetUsing the "net" keyword argument, we can request a net budget for each zone/record name or for a subset of zones and record names. Note that we can identify the record names we want without the added `"_IN"` or `"_OUT"` string suffix.
###Code
zon = np.ones((nlay, nrow, ncol), np.int)
zon[1, :, :] = 2
zon[2, :, :] = 3
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_budget(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df = zb.get_dataframes(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df.head(6)
###Output
_____no_output_____
###Markdown
Plot Budget ComponentsThe following is a function that can be used to better visualize the budget components using matplotlib.
###Code
def tick_label_formatter_comma_sep(x, pos):
return '{:,.0f}'.format(x)
def volumetric_budget_bar_plot(values_in, values_out, labels, **kwargs):
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = plt.gca()
x_pos = np.arange(len(values_in))
rects_in = ax.bar(x_pos, values_in, align='center', alpha=0.5)
x_pos = np.arange(len(values_out))
rects_out = ax.bar(x_pos, values_out, align='center', alpha=0.5)
plt.xticks(list(x_pos), labels)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=90)
ax.get_yaxis().set_major_formatter(mpl.ticker.FuncFormatter(tick_label_formatter_comma_sep))
ymin, ymax = ax.get_ylim()
if ymax != 0:
if abs(ymin) / ymax < .33:
ymin = -(ymax * .5)
else:
ymin *= 1.35
else:
ymin *= 1.35
plt.ylim([ymin, ymax * 1.25])
for i, rect in enumerate(rects_in):
label = '{:,.0f}'.format(values_in[i])
height = values_in[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymax)
vertical_alignment = 'bottom'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
for i, rect in enumerate(rects_out):
label = '{:,.0f}'.format(values_out[i])
height = values_out[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymin)
vertical_alignment = 'top'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
# horizontal line indicating zero
ax.plot([rects_in[0].get_x() - rects_in[0].get_width() / 2,
rects_in[-1].get_x() + rects_in[-1].get_width()], [0, 0], "k")
return rects_in, rects_out
fig = plt.figure(figsize=(16, 5))
times = [2., 500., 1000., 1095.]
for idx, t in enumerate(times):
ax = fig.add_subplot(1, len(times), idx + 1)
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=None, totim=t, aliases=aliases)
recname = 'STORAGE'
values_in = zb.get_dataframes(names='FROM_{}'.format(recname)).T.squeeze()
values_out = zb.get_dataframes(names='TO_{}'.format(recname)).T.squeeze() * -1
labels = values_in.index.tolist()
rects_in, rects_out = volumetric_budget_bar_plot(values_in, values_out, labels, ax=ax)
plt.ylabel('Volumetric rate, in Mgal/d')
plt.title('{} @ totim = {}'.format(recname, t))
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
FloPy ZoneBudget ExampleThis notebook demonstrates how to use the `ZoneBudget` class to extract budget information from the cell by cell budget file using an array of zones.First set the path and import the required packages. The flopy path doesn't have to be set if you install flopy from a binary installer. If you want to run this notebook, you have to set the path to your own flopy path.
###Code
import os
import sys
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
# run installed version of flopy or add local path
try:
import flopy
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('pandas version: {}'.format(pd.__version__))
print('flopy version: {}'.format(flopy.__version__))
# Set path to example datafiles
loadpth = os.path.join('..', 'data', 'zonbud_examples')
cbc_f = os.path.join(loadpth, 'freyberg.gitcbc')
###Output
_____no_output_____
###Markdown
Read File Containing ZonesUsing the `read_zbarray` utility, we can import zonebudget-style array files.
###Code
from flopy.utils import read_zbarray
zone_file = os.path.join(loadpth, 'zonef_mlt.zbr')
zon = read_zbarray(zone_file)
nlay, nrow, ncol = zon.shape
fig = plt.figure(figsize=(10, 4))
for lay in range(nlay):
ax = fig.add_subplot(1, nlay, lay+1)
im = ax.pcolormesh(zon[lay, ::-1, :])
cbar = plt.colorbar(im)
plt.gca().set_aspect('equal')
plt.show()
###Output
_____no_output_____
###Markdown
Extract Budget Information from ZoneBudget ObjectAt the core of the `ZoneBudget` object is a numpy structured array. The class provides some wrapper functions to help us interogate the array and save it to disk.
###Code
# Create a ZoneBudget object and get the budget record array
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 1096))
zb.get_budget()
# Get a list of the unique budget record names
zb.get_record_names()
# Look at a subset of fluxes
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zb.get_budget(names=names)
# Look at fluxes in from zone 2
names = ['FROM_RECHARGE', 'FROM_ZONE_1', 'FROM_ZONE_3']
zones = ['ZONE_2']
zb.get_budget(names=names, zones=zones)
# Look at all of the mass-balance records
names = ['TOTAL_IN', 'TOTAL_OUT', 'IN-OUT', 'PERCENT_DISCREPANCY']
zb.get_budget(names=names)
###Output
_____no_output_____
###Markdown
Convert UnitsThe `ZoneBudget` class supports the use of mathematical operators and returns a new copy of the object.
###Code
cmd = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=(0, 0))
cfd = cmd / 35.3147
inyr = (cfd / (250 * 250)) * 365 * 12
cmdbud = cmd.get_budget()
cfdbud = cfd.get_budget()
inyrbud = inyr.get_budget()
names = ['FROM_RECHARGE']
rowidx = np.in1d(cmdbud['name'], names)
colidx = 'ZONE_1'
print('{:,.1f} cubic meters/day'.format(cmdbud[rowidx][colidx][0]))
print('{:,.1f} cubic feet/day'.format(cfdbud[rowidx][colidx][0]))
print('{:,.1f} inches/year'.format(inyrbud[rowidx][colidx][0]))
cmd is cfd
###Output
_____no_output_____
###Markdown
Alias NamesA dictionary of {zone: "alias"} pairs can be passed to replace the typical "ZONE_X" fieldnames of the `ZoneBudget` structured array with more descriptive names.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=[1097.], aliases=aliases)
zb.get_budget()
###Output
_____no_output_____
###Markdown
Return the Budgets as a Pandas DataFrameSet `kstpkper` and `totim` keyword args to `None` (or omit) to return all times.The `get_dataframes()` method will return a DataFrame multi-indexed on `totim` and `name`.
###Code
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_dataframes()
###Output
_____no_output_____
###Markdown
Slice the multi-index dataframe to retrieve a subset of the budget.NOTE: We can pass "names" directly to the `get_dataframes()` method to return a subset of reocrds. By omitting the `"FROM_"` or `"TO_"` prefix we get both.
###Code
dateidx1 = 1095.
dateidx2 = 1097.
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Look at pumpage (`TO_WELLS`) as a percentage of recharge (`FROM_RECHARGE`)
###Code
dateidx1 = 1095.
dateidx2 = 1097.
zones = ['SURF']
# Pull out the individual records of interest
rech = df.loc[(slice(dateidx1, dateidx2), ['FROM_RECHARGE']), :][zones]
pump = df.loc[(slice(dateidx1, dateidx2), ['TO_WELLS']), :][zones]
# Remove the "record" field from the index so we can
# take the difference of the two DataFrames
rech = rech.reset_index()
rech = rech.set_index(['totim'])
rech = rech[zones]
pump = pump.reset_index()
pump = pump.set_index(['totim'])
pump = pump[zones] * -1
# Compute pumping as a percentage of recharge
(pump / rech) * 100.
###Output
_____no_output_____
###Markdown
Pass `start_datetime` and `timeunit` keyword arguments to return a dataframe with a datetime multi-index
###Code
dateidx1 = pd.Timestamp('1972-12-29')
dateidx2 = pd.Timestamp('1972-12-30')
names = ['FROM_RECHARGE', 'TO_WELLS', 'CONSTANT_HEAD']
zones = ['SURF', 'CONF']
df = zb.get_dataframes(start_datetime='1970-01-01', timeunit='D', names=names)
df.loc[(slice(dateidx1, dateidx2), slice(None)), :][zones]
###Output
_____no_output_____
###Markdown
Pass `index_key` to indicate which fields to use in the multi-index (default is "totim"; valid keys are "totim" and "kstpkper")
###Code
df = zb.get_dataframes(index_key='kstpkper')
df.head()
###Output
_____no_output_____
###Markdown
Write Budget Output to CSVWe can write the resulting recarray to a csv file with the `.to_csv()` method of the `ZoneBudget` object.
###Code
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=[(0, 0), (0, 1096)])
f_out = os.path.join('data', 'Example_output.csv')
zb.to_csv(f_out)
# Read the file in to see the contents
try:
import pandas as pd
print(pd.read_csv(f_out).to_string(index=False))
except:
with open(fname, 'r') as f:
for line in f.readlines():
print('\t'.join(line.split(',')))
###Output
totim time_step stress_period name ZONE_0 ZONE_1 ZONE_2 ZONE_3
1.0 0 0 FROM_STORAGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_CONSTANT_HEAD 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_WELLS 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_DRAINS 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_RECHARGE 0.0 6222.673300 18.062912 36.125824
1.0 0 0 FROM_ZONE_0 0.0 0.000000 0.000000 0.000000
1.0 0 0 FROM_ZONE_1 0.0 0.000000 4275.257300 491.945070
1.0 0 0 FROM_ZONE_2 0.0 2744.821800 0.000000 2115.654000
1.0 0 0 FROM_ZONE_3 0.0 451.545720 1215.952300 0.000000
1.0 0 0 TOTAL_IN 0.0 9419.041000 5509.272500 2643.725000
1.0 0 0 TO_STORAGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_CONSTANT_HEAD 0.0 821.283200 648.806100 976.233600
1.0 0 0 TO_WELLS 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_DRAINS 0.0 3832.150000 0.000000 0.000000
1.0 0 0 TO_RECHARGE 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_0 0.0 0.000000 0.000000 0.000000
1.0 0 0 TO_ZONE_1 0.0 0.000000 2744.821800 451.545720
1.0 0 0 TO_ZONE_2 0.0 4275.257300 0.000000 1215.952300
1.0 0 0 TO_ZONE_3 0.0 491.945070 2115.654000 0.000000
1.0 0 0 TOTAL_OUT 0.0 9420.636000 5509.282000 2643.731400
1.0 0 0 IN-OUT 0.0 1.594727 0.009766 0.006348
1.0 0 0 PERCENT_DISCREPANCY NaN 0.016929 0.000177 0.000240
1097.0 0 1096 FROM_STORAGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_CONSTANT_HEAD 0.0 0.000000 231.566590 86.217200
1097.0 0 1096 FROM_WELLS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_DRAINS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_RECHARGE 0.0 5145.581500 14.936376 29.872751
1097.0 0 1096 FROM_ZONE_0 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 FROM_ZONE_1 0.0 0.000000 3475.123500 138.600450
1097.0 0 1096 FROM_ZONE_2 0.0 3269.318800 0.000000 1764.655300
1097.0 0 1096 FROM_ZONE_3 0.0 192.186040 1528.048200 0.000000
1097.0 0 1096 TOTAL_IN 0.0 8607.086000 5249.675000 2019.345700
1097.0 0 1096 TO_STORAGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_CONSTANT_HEAD 0.0 230.548360 215.701500 299.113800
1097.0 0 1096 TO_WELLS 0.0 4762.800000 0.000000 0.000000
1097.0 0 1096 TO_DRAINS 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_RECHARGE 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_0 0.0 0.000000 0.000000 0.000000
1097.0 0 1096 TO_ZONE_1 0.0 0.000000 3269.318800 192.186040
1097.0 0 1096 TO_ZONE_2 0.0 3475.123500 0.000000 1528.048200
1097.0 0 1096 TO_ZONE_3 0.0 138.600450 1764.655300 0.000000
1097.0 0 1096 TOTAL_OUT 0.0 8607.072000 5249.676000 2019.348000
1097.0 0 1096 IN-OUT 0.0 0.013672 0.000977 0.002319
1097.0 0 1096 PERCENT_DISCREPANCY NaN 0.000159 0.000019 0.000115
###Markdown
Net BudgetUsing the "net" keyword argument, we can request a net budget for each zone/record name or for a subset of zones and record names. Note that we can identify the record names we want without the added `"_IN"` or `"_OUT"` string suffix.
###Code
zon = np.ones((nlay, nrow, ncol), int)
zon[1, :, :] = 2
zon[2, :, :] = 3
aliases = {1: 'SURF', 2:'CONF', 3: 'UFA'}
times = list(range(1092, 1097+1))
zb = flopy.utils.ZoneBudget(cbc_f, zon, totim=times, aliases=aliases)
zb.get_budget(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df = zb.get_dataframes(names=['STORAGE', 'WELLS'], zones=['SURF', 'UFA'], net=True)
df.head(6)
###Output
_____no_output_____
###Markdown
Plot Budget ComponentsThe following is a function that can be used to better visualize the budget components using matplotlib.
###Code
def tick_label_formatter_comma_sep(x, pos):
return '{:,.0f}'.format(x)
def volumetric_budget_bar_plot(values_in, values_out, labels, **kwargs):
if 'ax' in kwargs:
ax = kwargs.pop('ax')
else:
ax = plt.gca()
x_pos = np.arange(len(values_in))
rects_in = ax.bar(x_pos, values_in, align='center', alpha=0.5)
x_pos = np.arange(len(values_out))
rects_out = ax.bar(x_pos, values_out, align='center', alpha=0.5)
plt.xticks(list(x_pos), labels)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=90)
ax.get_yaxis().set_major_formatter(mpl.ticker.FuncFormatter(tick_label_formatter_comma_sep))
ymin, ymax = ax.get_ylim()
if ymax != 0:
if abs(ymin) / ymax < .33:
ymin = -(ymax * .5)
else:
ymin *= 1.35
else:
ymin *= 1.35
plt.ylim([ymin, ymax * 1.25])
for i, rect in enumerate(rects_in):
label = '{:,.0f}'.format(values_in[i])
height = values_in[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymax)
vertical_alignment = 'bottom'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
for i, rect in enumerate(rects_out):
label = '{:,.0f}'.format(values_out[i])
height = values_out[i]
x = rect.get_x() + rect.get_width() / 2
y = height + (.02 * ymin)
vertical_alignment = 'top'
horizontal_alignment = 'center'
ax.text(x, y, label, ha=horizontal_alignment, va=vertical_alignment, rotation=90)
# horizontal line indicating zero
ax.plot([rects_in[0].get_x() - rects_in[0].get_width() / 2,
rects_in[-1].get_x() + rects_in[-1].get_width()], [0, 0], "k")
return rects_in, rects_out
fig = plt.figure(figsize=(16, 5))
times = [2., 500., 1000., 1095.]
for idx, t in enumerate(times):
ax = fig.add_subplot(1, len(times), idx + 1)
zb = flopy.utils.ZoneBudget(cbc_f, zon, kstpkper=None, totim=t, aliases=aliases)
recname = 'STORAGE'
values_in = zb.get_dataframes(names='FROM_{}'.format(recname)).T.squeeze()
values_out = zb.get_dataframes(names='TO_{}'.format(recname)).T.squeeze() * -1
labels = values_in.index.tolist()
rects_in, rects_out = volumetric_budget_bar_plot(values_in, values_out, labels, ax=ax)
plt.ylabel('Volumetric rate, in Mgal/d')
plt.title('{} @ totim = {}'.format(recname, t))
plt.tight_layout()
plt.show()
###Output
_____no_output_____ |
books/eval_baseline.ipynb | ###Markdown
CONFIGURATE
###Code
!ls ../out/attnet
project = '../out/attnet'
name = 'att_attnet_ferattention_attloss_adam_ckdark_dim64_resnet18x32_fold0_000'
pathnamedataset = '~/.datasets'
pathmodel = os.path.join( project, name, 'models/model_best.pth.tar' ) #model_best, chk000120
pathproject = os.path.join( project, name )
batch_size = 2
workers = 1
cuda = False
parallel = False
gpu = 2
seed = 1
imsize = 64
###Output
_____no_output_____
###Markdown
Load model
###Code
# load model
print('>> Load model ...')
net = AttentionNeuralNet(
patchproject=project,
nameproject=name,
no_cuda=cuda,
parallel=parallel,
seed=seed,
gpu=gpu
)
if net.load( pathmodel ) is not True:
assert(False)
###Output
>> Load model ...
=> loading checkpoint '../out/attnet/att_attnet_ferattention_attloss_adam_ckdark_dim64_resnet18x32_fold0_000/models/model_best.pth.tar'
=> loaded checkpoint for ferattention arch!
###Markdown
Load dataset
###Code
# Load dataset
print('>> Load dataset ...')
namedataset = FactoryDataset.ckdark
subset = FactoryDataset.validation
imagesize=64
idenselect=np.arange(10)
breal=False
if breal:
dataset = Dataset(
data=FactoryDataset.factory(
pathname=pathnamedataset,
name=namedataset,
subset=subset,
idenselect=idenselect,
#transform=train_transform,
download=True
),
num_channels=3,
transform=get_transforms_det( imsize ),
# transform=transforms.Compose([
# #mtrans.ToPad(h_pad=5,w_pad=5),
# mtrans.ToResize( (imagesize, imagesize), resize_mode='square' ),
# #mtrans.ToRandomTransform(mtrans.ToGaussianNoise( sigma=0.1 ), prob=1.0 ),
# #mtrans.RandomCrop( (255,255), limit=50, padding_mode=cv2.BORDER_CONSTANT ),
# #mtrans.ToResizeUNetFoV(imsize, cv2.BORDER_REFLECT_101),
# mtrans.ToGrayscale(),
# mtrans.ToTensor(),
# #mtrans.ToMeanNormalization( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] )
# mtrans.ToNormalization(),
# ])
)
else:
dataset = SyntheticFaceDataset(
data=FactoryDataset.factory(
pathname=pathnamedataset,
name=namedataset,
subset=subset,
idenselect=idenselect,
download=True
),
pathnameback='~/.datasets/coco',
ext='jpg',
count=2000,
num_channels=3,
iluminate=True, angle=45, translation=0.3, warp=0.2, factor=0.2,
#iluminate=True, angle=45, translation=0.1, warp=0.0, factor=0.0,
transform_data=get_transforms_aug( imsize ),
transform_image=get_transforms_det( imsize ),
)
print(len(dataset))
# print( dataset.classes )
# print( dataset.data.classes )
###Output
>> Load dataset ...
2000
###Markdown
Evaluate model
###Code
import torch.nn.functional as FF
def norm(x):
x = x-x.min()
x = x / x.max()
return x
idx=0
k=0
for i in range( len(dataset) ):
idx = i + 10
sample = dataset[ idx ]
if breal:
image = sample['image'].unsqueeze(0)
label = sample['label'].argmax()
#image = F.fliplr( image )
image_org = sample['image']
else:
image_org, image, mask, meta = sample
image = image.unsqueeze(0)
label = meta[0]
y_theta = meta[1:].view(-1, 2, 3)
image = image.cuda()
#y_theta = y_theta.cuda()
y_lab_hat, att, fmap, srf = net( image )
att = att.data.cpu().numpy().transpose(2,3,1,0)[...,0]
fmap = fmap.data.cpu().numpy().transpose(2,3,1,0)[:,:,0,0]
srf = srf.data.cpu().numpy().transpose(2,3,1,0)[...,0]
image = image.data.cpu().numpy().transpose(2,3,1,0)[...,0]
image_org = image_org.numpy().transpose(1,2,0)
y_lab_hat_max = y_lab_hat.argmax()
#if label != 6: continue
k+=1
if k > 5: break
plt.figure( figsize=(18,8))
plt.subplot(151)
plt.imshow( norm(image_org) )
plt.title('original image' )
plt.subplot(152)
plt.imshow( norm(image) )
plt.title('image | class: {} est: {}'.format( label, y_lab_hat_max ) )
plt.axis('off')
plt.subplot(153)
plt.imshow( (fmap))
plt.title('attention map')
plt.axis('off' )
plt.subplot(154)
plt.imshow( srf.sum(2) )
plt.title('feature map')
plt.axis('off' )
plt.subplot(155)
plt.imshow( norm(att) )
plt.title('attention feature')
plt.axis('off')
plt.show()
# classes = ['Neutral - NE', 'Happiness - HA', 'Surprise - SU', 'Sadness - SA', 'Anger - AN', 'Disgust - DI', 'Fear - FR', 'Contempt - CO']
# net.net
def vistensor(tensor, ch=0, allkernels=False, nrow=8, padding=1, brgb=True):
"""
vistensor: visuzlization tensor
@ch: visualization channel
@allkernels: visualization all tensores
"""
n,c,w,h = tensor.shape
if allkernels: tensor = tensor.view(n*c,-1,w,h )
elif brgb:
tensor = tensor[:,ch:(ch+3),:,:]
else:
tensor = tensor[:,ch,:,:].unsqueeze(dim=1)
rows = np.min( (tensor.shape[0]//nrow + 1, 64 ) )
grid = utils.make_grid(tensor, nrow=nrow, normalize=True, padding=padding)
plt.figure( figsize=(nrow,rows) )
plt.imshow(grid.numpy().transpose((1, 2, 0)))
kernel = alexnet.features[0].weight.data.clone()
print(kernel.shape)
vistensor(kernel, ch=0, allkernels=False, nrow=20, brgb=True )
#savetensor(kernel,'kernel.png', allkernels=False)
plt.axis('off')
plt.ioff()
plt.show()
model = net.net
kernel = model.netclass
kernel = kernel.conv1.weight.data.cpu().clone()
print(kernel.shape)
vistensor(kernel, ch=0, allkernels=False, nrow=20, brgb=True )
#savetensor(kernel,'kernel.png', allkernels=False)
plt.axis('off')
plt.ioff()
plt.show()
def norm(x):
x = x-x.min()
x = x / x.max()
return x
def sigmoid(x):
return 1. / (1 + np.exp(-x))
def save(image, srf, fmap, att, pathname='../netruns/', pos_name=''):
image_name = os.path.join(pathname, '{}_{}.png'.format('image', pos_name) )
srf_name = os.path.join(pathname, '{}_{}.png'.format('srf', pos_name) )
map_name = os.path.join(pathname, '{}_{}.png'.format('map', pos_name) )
smap_name = os.path.join(pathname, '{}_{}.png'.format('sigma_map', pos_name) )
att_name = os.path.join(pathname, '{}_{}.png'.format('att', pos_name) )
fig = plt.figure(figsize=(8,8)); plt.imshow( norm(image) ); plt.axis('off' ); fig.savefig(image_name, bbox_inches='tight',transparent=True, pad_inches=0)
fig = plt.figure(figsize=(8,8)); plt.imshow( srf.sum(2) ); plt.axis('off' ); fig.savefig(srf_name, bbox_inches='tight',transparent=True, pad_inches=0)
fig = plt.figure(figsize=(8,8)); plt.imshow( (fmap) ); plt.axis('off' ); fig.savefig(map_name, bbox_inches='tight',transparent=True, pad_inches=0)
fig = plt.figure(figsize=(8,8)); plt.imshow( sigmoid(fmap) ); plt.axis('off' ); fig.savefig(smap_name, bbox_inches='tight',transparent=True, pad_inches=0)
fig = plt.figure(figsize=(8,8)); plt.imshow( norm(att) ); plt.axis('off' ); fig.savefig(att_name, bbox_inches='tight',transparent=True, pad_inches=0)
idx=2
sample = dataset[ idx ]
if breal:
image = sample['image'].unsqueeze(0)
label = sample['label'].argmax()
else:
image_org, image, mask, meta = sample
image = image.unsqueeze(0)
y_lab_hat, att, fmap, srf = net( image )
att = att.data.cpu().numpy().transpose(2,3,1,0)[...,0]
fmap = fmap.data.cpu().numpy().transpose(2,3,1,0)[:,:,0,0]
srf = srf.data.cpu().numpy().transpose(2,3,1,0)[...,0]
image = image.data.cpu().numpy().transpose(2,3,1,0)[...,0]
y_lab_hat = y_lab_hat.argmax()
print(y_lab_hat,'|',label)
# save( norm(image), srf, fmap, att )
plt.figure( figsize=(16,8))
plt.subplot(141)
plt.imshow( norm(image) )
plt.title('image')
plt.axis('off')
plt.subplot(142)
plt.imshow( (fmap) ) #sigmoid
plt.title('attention map')
plt.axis('off' )
plt.subplot(143)
plt.imshow( srf.sum(2) )
plt.title('feature map')
plt.axis('off' )
plt.subplot(144)
plt.imshow( norm(att) )
# plt.title('class {}/{}'.format(y_lab_hat, label) )
plt.title('attention feature')
plt.axis('off')
plt.show()
import random
import scipy.misc
def save( pathname, image ):
cv2.imwrite(pathname, image )
def sigmoid(x):
return 1. / (1 + np.exp(-x))
def norm(x):
x = x-x.min()
x = x / x.max()
return x
def mean_normalization(image, mean, std):
tensor = image.float()/255.0
result_tensor = []
for t, m, s in zip(tensor, mean, std):
result_tensor.append(t.sub_(m).div_(s))
return torch.stack(result_tensor, 0)
def pad(image, xypad):
h,w = image.shape
im_pad = np.zeros( (h+2*xypad,w+2*xypad) )
im_pad[xypad:xypad+h,xypad:xypad+w] = image
return im_pad
def crop(image, xycrop):
h,w = image.shape[:2]
image = image[ xycrop:h-xycrop,xycrop:w-xycrop]
return image
imagesize=64
image_org = cv2.imread('../rec/selfie_happy_dos.png')[:,:,(2,1,0)]
image = image_org.mean(axis=2)
# sigma=0.09
# image = image/255.0
# noise = np.array([random.gauss(0,sigma) for i in range( image.shape[0]*image.shape[1] )])
# noise = noise.reshape(image.shape[0],image.shape[1])
# image = (np.clip(image+noise,0,1)*255).astype(np.uint8)
# # image = pad(image,10)
# image = crop(image, 20)
image = np.stack( (image,image,image), axis=2 )
image = cv2.resize( image, (imagesize, imagesize) )
# image = norm((image/255)**1.5)*255
image = torch.from_numpy(image).permute( (2,0,1) ).unsqueeze(0).float()
# image = mean_normalization(image, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
image = image / 255
print(image.shape)
y_lab_hat, att, fmap, srf = net( image )
att = att.data.cpu().numpy().transpose(2,3,1,0)[...,0]
fmap = fmap.data.cpu().numpy().transpose(2,3,1,0)[:,:,0,0]
srf = srf.data.cpu().numpy().transpose(2,3,1,0)[...,0]
image = image.data.cpu().numpy().transpose(2,3,1,0)[...,0]
y_lab_hat = y_lab_hat.data.cpu().argmax().numpy()
fmap = cv2.applyColorMap( (norm(fmap)*255).astype(np.uint8) , cv2.COLORMAP_JET)[:,:,(2,1,0)]
srf = cv2.applyColorMap( (norm(srf.sum(2))*255).astype(np.uint8), cv2.COLORMAP_JET)[:,:,(2,1,0)]
# att = att.data.cpu().numpy().transpose(2,3,1,0)[...,0]
# fmap = fmap.data.cpu().numpy().transpose(2,3,1,0)[:,:,0,0]
# srf = srf.data.cpu().numpy().transpose(2,3,1,0)[...,0]
# image = image.data.cpu().numpy().transpose(2,3,1,0)[...,0]
# y_lab_hat = y_lab_hat.argmax()
print(fmap.shape, fmap.min(), fmap.max() )
print(srf.shape, srf.min(), srf.max() )
print(y_lab_hat)
# print(dataset.data.classes[y_lab_hat])
att_mask = (np.abs( att ) > 0.2).astype(np.float)
print(att_mask.shape, att_mask.min(), att_mask.max() )
image = norm(image)
#print(image.min(), image.max() )
bsave=False
if bsave:
save('../out/image.png', image_org[:,:,(2,1,0)] )
save('../out/attmap.png', fmap[:,:,(2,1,0)] )
save('../out/srf.png', srf[:,:,(2,1,0)] )
save('../out/att.png', norm(att)*255 )
plt.figure( figsize=(16,8))
plt.subplot(141)
plt.imshow( ((image_org) ).astype( np.uint8 ) )
plt.title('image')
plt.axis('off')
plt.subplot(142)
plt.imshow( (fmap))
plt.title('attention map')
plt.axis('off' )
plt.subplot(143)
plt.imshow( srf )
plt.title('feature map')
plt.axis('off' )
plt.subplot(144)
plt.imshow( norm(att) )
# plt.title('class {}/{}'.format(y_lab_hat, label) )
plt.title('attention feature')
plt.axis('off')
plt.show()
def save(image, srf, fmap, att, imemotion, pathname='../netruns/results/', pos_name=''):
image_name = os.path.join(pathname, '{}_{}.png'.format('image', pos_name) )
srf_name = os.path.join(pathname, '{}_{}.png'.format('srf', pos_name) )
map_name = os.path.join(pathname, '{}_{}.png'.format('map', pos_name) )
smap_name = os.path.join(pathname, '{}_{}.png'.format('sigma_map', pos_name) )
att_name = os.path.join(pathname, '{}_{}.png'.format('att', pos_name) )
emo_name = os.path.join(pathname, '{}_{}.png'.format('emotion', pos_name) )
fig = plt.figure(figsize=(8,8)); plt.imshow( norm(image) ); plt.axis('off' ); fig.savefig(image_name, bbox_inches='tight',transparent=True, pad_inches=0)
fig = plt.figure(figsize=(8,8)); plt.imshow( srf.sum(2) ); plt.axis('off' ); fig.savefig(srf_name, bbox_inches='tight',transparent=True, pad_inches=0)
fig = plt.figure(figsize=(8,8)); plt.imshow( (fmap) ); plt.axis('off' ); fig.savefig(map_name, bbox_inches='tight',transparent=True, pad_inches=0)
fig = plt.figure(figsize=(8,8)); plt.imshow( sigmoid(fmap) ); plt.axis('off' ); fig.savefig(smap_name, bbox_inches='tight',transparent=True, pad_inches=0)
fig = plt.figure(figsize=(8,8)); plt.imshow( norm(att) ); plt.axis('off' ); fig.savefig(att_name, bbox_inches='tight',transparent=True, pad_inches=0)
fig = plt.figure(figsize=(8,8)); plt.imshow( imemotion ); plt.axis('off' ); fig.savefig(emo_name, bbox_inches='tight',transparent=True, pad_inches=0)
def drawlabel(y, emotions, imsize=(500,500,3)):
colors = ([255,255,255],[255,255,255],[0,0,0],[0,0,0])
hbox=32; wbox=135 + 210
imemotions = np.ones( imsize, dtype=np.uint8 )*255
ymax = y.argmax()
for i, yi in enumerate(y):
k = 1 if y[i]>0.5 else 0
kh = 1 if ymax==i else 0
bbox = np.array([[0,0],[wbox,0],[wbox,hbox],[0,hbox]]);
bbox[:,0] += 19
bbox[:,1] += 50-28 + (i)*40
imemotions = cv2.fillConvexPoly(imemotions, bbox, color=colors[kh] )
bbox = np.array([[0,0],[int(wbox*y[i]),0],[int(y[i]*wbox),hbox],[0,hbox]]);
bbox[:,0] += 19
bbox[:,1] += 50-28 + (i)*40
imemotions = cv2.fillConvexPoly(imemotions, bbox, color=[255,160,122] )
cv2.putText(
imemotions,
'{}: {:.3f}'.format(emotions[i][:-5],y[i]),
(20, 50 + (i)*40),
color=colors[2+kh],
fontFace=cv2.FONT_HERSHEY_TRIPLEX,
fontScale=1,
thickness=2
)
imemotions = imemotions[20:-20,20:-20,:]
#imemotions = cv2.resize( imemotions, (128,128) )
return imemotions
imagesize=128
# sigmas = [ 0.01, 0.05, 0.07, 0.09, 0.1, 0.2, 0.3, 0.4 ]
sigmas = [ 0.001 ]
for i,sigma in enumerate(sigmas):
# gammas = [0.1, 0.2, 0.5, 1.0, 1.2, 1.5, 1.8, 2.0]
# gammas = [1.0]
# for i,gamma in enumerate(gammas):
image = cv2.imread('../rec/selfie_happy_dos.png')[:,:,(2,1,0)].mean(axis=2)
image = crop(image, 10)
# image = image/255.0
# noise = np.array([random.gauss(0,sigma) for i in range( image.shape[0]*image.shape[1] )])
# noise = noise.reshape(image.shape[0],image.shape[1])
# image = (np.clip(image+noise,0,1)*255).astype(np.uint8)
# image = pad(image,10)
image = np.stack( (image,image,image), axis=2 )
image = cv2.resize( image, (imagesize,imagesize) )
# image = norm((image/255)**gamma)*255
image = torch.from_numpy(image).permute( (2,0,1) ).unsqueeze(0).float()
image = mean_normalization(image, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
#print(image.shape)
y_lab_hat, att, fmap, srf = net( image )
att = att.data.cpu().numpy().transpose(2,3,1,0)[...,0]
fmap = fmap.data.cpu().numpy().transpose(2,3,1,0)[:,:,0,0]
srf = srf.data.cpu().numpy().transpose(2,3,1,0)[...,0]
image = image.data.cpu().numpy().transpose(2,3,1,0)[...,0]
y_lab_hat_max = y_lab_hat.argmax()
#y_lab_hat = TF.softmax( y_lab_hat, dim=1 )
print(y_lab_hat)
print(y_lab_hat.shape)
print(y_lab_hat_max)
#print(fmap.shape, fmap.min(), fmap.max() )
#print(srf.shape, srf.min(), srf.max() )
#print(y_lab_hat_max, dataset.data.classes[y_lab_hat_max])
emotions = dataset.data.classes
imemotion = drawlabel(y_lab_hat[0,:], emotions, imsize=(360,385,3) )
# save( image, srf, fmap, att, imemotion, pathname='../out/result/', pos_name=i )
print('save ...')
dataloader = DataLoader(dataset, batch_size=100, shuffle=False, num_workers=0 )
print( len(dataset) )
Y_labs, Y_lab_hats = net.representation( dataloader, breal )
print(Y_lab_hats.shape, Y_labs.shape)
reppathname = os.path.join( pathproject, 'rep_{}_{}_{}_{}.pth'.format(name, namedataset, subset, 'real' if breal else 'no_real' ) )
torch.save( { 'Yh':Y_lab_hats, 'Y':Y_labs }, reppathname )
print('>>> save')
import sklearn.metrics as metrics
str_real = 'real' if breal else 'no_real'
# rep_val_pathname = os.path.join( pathproject, 'rep_{}_{}_{}_{}.pth'.format(name, namedataset, 'train', str_real) )
rep_val_pathname = os.path.join( pathproject, 'rep_{}_{}_{}_{}.pth'.format(name, namedataset, 'val', str_real) )
# rep_val_pathname = os.path.join( pathproject, 'rep_{}_{}_{}_{}.pth'.format(name, namedataset, 'test', str_real) )
data_emb_val = torch.load(rep_val_pathname)
Yto = data_emb_val['Y']
Yho = data_emb_val['Yh']
yhat = np.argmax( Yho, axis=1 )
y = Yto
acc = metrics.accuracy_score(y, yhat)
precision = metrics.precision_score(y, yhat, average='weighted')
recall = metrics.recall_score(y, yhat, average='weighted')
f1_score = 2*precision*recall/(precision+recall)
# print('Accuracy : %f' % acc)
# print('Precision : %f' % precision)
# print('Recall : %f' % recall)
# print('F1 score : %f' % f1_score)
# print("")
print('|Acc\t|Prec\t|Rec\t|F1\t|')
print( '|{:0.3f}\t|{:0.3f}\t|{:0.3f}\t|{:0.3f}\t|'.format(acc,precision,recall,f1_score).replace('.',',') )
print()
print( '{:0.3f}\n{:0.3f}\n{:0.3f}\n{:0.3f}'.format(acc,precision,recall,f1_score).replace('.',',') )
import sklearn.metrics as metrics
namedataset = FactoryDataset.ferblack
subset = FactoryDataset.validation
imagesize=128
def get_transforms_noise(size_input, sigma):
return transforms.Compose([
mtrans.ToResize( (size_input, size_input), resize_mode='squash' ) ,
mtrans.ToGaussianNoise( sigma=sigma ),
mtrans.ToTensor(),
mtrans.ToMeanNormalization( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ),
])
accs = []
sigmas = [ 0.01, 0.05, 0.07, 0.09, 0.1, 0.2, 0.3, 0.4 ]
# sigmas = [ 0.09 ]
for sigma in sigmas:
# dataset = Dataset(
# data=FactoryDataset.factory(
# pathname=pathnamedataset,
# name=namedataset,
# subset=subset,
# #transform=train_transform,
# download=True
# ),
# num_channels=3,
# transform=transforms.Compose([
# mtrans.ToResize( (imagesize, imagesize), resize_mode='square' ),
# mtrans.ToGaussianNoise( sigma=sigma ),
# mtrans.ToTensor(),
# mtrans.ToMeanNormalization( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] )
# ])
# )
dataset = SyntheticFaceDataset(
data=FactoryDataset.factory(
pathname='~/.datasets/',
name=namedataset,
subset=subset,
download=True
),
pathnameback='~/.datasets/coco',
ext='jpg',
count=2000,
num_channels=3,
iluminate=True, angle=45, translation=0.3, warp=0.2, factor=0.2,
transform_data=get_transforms_noise( imsize, sigma ),
transform_image=get_transforms_det( imsize ),
)
dataloader = DataLoader(dataset, batch_size=40, shuffle=False, num_workers=10 )
Y_labs = []
Y_lab_hats = []
Zs = []
for i_batch, sample in enumerate( tqdm(dataloader) ):
#x_img, y_lab = sample['image'], sample['label']
x_org, x_img, y_mask, y_lab = sample
x_img = x_img.cuda()
#y_lab = y_lab.argmax(dim=1)
z, y_lab_hat, att, _,_ = net( x_img )
Y_labs.append(y_lab)
Y_lab_hats.append(y_lab_hat.data.cpu())
Zs.append(z.data.cpu())
Y_labs = np.concatenate( Y_labs, axis=0 )
Y_lab_hats = np.concatenate( Y_lab_hats, axis=0 )
Zs = np.concatenate( Zs, axis=0 )
Xto = Zs
Yto = Y_labs
Yho = Y_lab_hats
yhat = np.argmax( Yho, axis=1 )
y = Yto
acc = metrics.accuracy_score(y, yhat)
precision = metrics.precision_score(y, yhat, average='macro')
recall = metrics.recall_score(y, yhat, average='macro')
f1_score = 2*precision*recall/(precision+recall)
accs.append(acc)
#print('Accuracy : %f' % acc)
#print('Precision : %f' % precision)
#print('Recall : %f' % recall)
#print('F1 score : %f' % f1_score)
#print("")
accs = np.stack( accs, axis=0 )
print(accs)
plt.figure()
plt.plot(sigmas, accs, '-k^', label='Att' )
#plt.plot(ix, accK*100, '-b^', label='Acc@K')
plt.xlabel('Noise')
plt.ylabel('Accuracy')
plt.legend()
plt.grid(True)
plt.show()
#Real
accs_base = [0.8625, 0.825, 0.8125, 0.7625, 0.7875, 0.5375, 0.325, 0.2375]
accs_att_cls = [0.875, 0.875, 0.875, 0.8625, 0.85, 0.5625, 0.1625, 0.1625]
accs_att_rep = [0.8625, 0.8625, 0.8625, 0.85, 0.85, 0.7, 0.3875, 0.1375]
plt.figure()
plt.plot(sigmas[:-1], accs_att_rep[:-1], '-k^', label='Att+Rep+Cls' )
plt.plot(sigmas[:-1], accs_att_cls[:-1], '--ks', label='Att+Cls' )
plt.plot(sigmas[:-1], accs_base[:-1], ':k<', label='Baseline' )
plt.xlabel('Noise')
plt.ylabel('Accuracy')
plt.legend()
plt.grid(True)
plt.savefig('../netruns/noise_realdataset.png')
plt.show()
#Synthetic
accs_base = [0.775, 0.7355, 0.683, 0.6355, 0.6275, 0.3745, 0.267, 0.221 ]
accs_att_cls = [0.799, 0.812, 0.797, 0.7975, 0.785, 0.432, 0.1675, 0.1625]
accs_att_rep = [0.8185, 0.824, 0.801, 0.786, 0.7785, 0.574, 0.262, 0.1355]
plt.figure()
plt.plot(sigmas[:-1], accs_att_rep[:-1], '-k^', label='Att+Rep+Cls' )
plt.plot(sigmas[:-1], accs_att_cls[:-1], '--ks', label='Att+Cls' )
plt.plot(sigmas[:-1], accs_base[:-1], ':k<', label='Baseline' )
plt.xlabel('Noise')
plt.ylabel('Accuracy')
plt.legend()
plt.grid(True)
plt.savefig('../out/noise_syntheticdataset.png')
plt.show()
###Output
_____no_output_____ |
notebooks/Drosdowsky classification.ipynb | ###Markdown
Use Drosdowsky (1996) classification on sounding data
###Code
import numpy as np
import numpy.ma as ma
from netCDF4 import Dataset
from datetime import datetime, timedelta
import glob
from copy import deepcopy
import math
from matplotlib import pyplot as plt
from matplotlib import dates
%matplotlib inline
import metpy.calc as mpcalc
from metpy.units import units
data_path_sounding = '/home/rjackson/data/DARWIN_radiosonde/'
# get_sounding_times
# start_year = Start year of animation
# start_month = Start month of animation
# start_day = Start day of animation
# start_hour = Start hour of animation
# end_year = End year of animation
# end_month = End month of animation
# end_day = End day of animation
# end_minute = End minute of animation
# minute_interval = Interval in minutes between scans (default is 5)
# This procedure acquires an array of sounding times between start_time and end_time.
# Only 23 UTC is loaded following Pope et al. (2008)
def get_sounding_times(start_year, start_month, start_day,
start_hour, end_year,
end_month, end_day, end_hour,
minute_interval=5):
start_time = datetime(start_year,
start_month,
start_day,
start_hour,
)
end_time = datetime(end_year,
end_month,
end_day,
end_hour,
)
deltatime = end_time - start_time
if(deltatime.seconds > 0 or deltatime.minutes > 0):
no_days = deltatime.days + 1
else:
no_days = deltatime.days
if(start_day != end_day):
no_days = no_days + 1
days = np.arange(0, no_days, 1)
print('We are about to load sounding files for ' + str(no_days) + ' days')
# Find the list of files for each day
cur_time = start_time
file_list = []
time_list = []
for i in days:
year_str = "%04d" % cur_time.year
day_str = "%02d" % cur_time.day
month_str = "%02d" % cur_time.month
format_str = (data_path_sounding +
'YPDN_' +
year_str +
month_str +
day_str +
'_00.nc')
data_list = glob.glob(format_str)
if(i % 100 == 0):
print(i)
for j in range(0, len(data_list)):
file_list.append(data_list[j])
cur_time = cur_time + timedelta(days=1)
# Parse all of the dates and time in the interval and add them to the time list
past_time = []
for file_name in file_list:
date_str = file_name[-14:-3]
year_str = date_str[0:4]
month_str = date_str[4:6]
day_str = date_str[6:8]
hour_str = date_str[9:11]
cur_time = datetime(int(year_str),
int(month_str),
int(day_str),
int(hour_str),
)
time_list.append(cur_time)
# Sort time list and make sure time are at least xx min apart
time_list.sort()
time_list_sorted = deepcopy(time_list)
time_list_final = []
past_time = []
for times in time_list_sorted:
cur_time = times
if(past_time == []):
past_time = cur_time
if(cur_time - past_time >= timedelta(minutes=minute_interval)
and cur_time >= start_time and cur_time <= end_time):
time_list_final.append(cur_time)
past_time = cur_time
return time_list_final
# Get a Radar object given a time period in the CPOL dataset
def get_sounding(time):
year_str = "%04d" % time.year
month_str = "%02d" % time.month
day_str = "%02d" % time.day
hour_str = "%02d" % time.hour
file_name_str = (data_path_sounding +
'YPDN_' +
year_str +
month_str +
day_str +
'_' +
hour_str +
'.nc')
sounding = Dataset(file_name_str, mode='r')
return sounding
###Output
_____no_output_____
###Markdown
Load netCDF sounding datasurface (assumedhere to be 1013 hPa), 950, 925, 900, 850, 800, 750, 700,650, 600, 550, 500, 400, 300, 200, and 100 hPa (a total of16 levels)
###Code
def find_nearest(array,value):
idx = (np.abs(array-value)).argmin()
return idx
###Output
_____no_output_____
###Markdown
Here is where we input the times and pressure levels to get sounding from.
###Code
start_year = 1998
end_year = 2015
sounding_times = get_sounding_times(start_year,1,1,0,
end_year,1,1,23)
pres_levels = [1013, 950, 925, 900, 850, 800, 750, 700,
650, 600, 550, 500, 400, 300, 200, 100]
print(len(sounding_times))
###Output
We are about to load sounding files for 6210 days
0
100
200
300
400
500
600
700
800
900
1000
1100
1200
1300
1400
1500
1600
1700
1800
1900
2000
2100
2200
2300
2400
2500
2600
2700
2800
2900
3000
3100
3200
3300
3400
3500
3600
3700
3800
3900
4000
4100
4200
4300
4400
4500
4600
4700
4800
4900
5000
5100
5200
5300
5400
5500
5600
5700
5800
5900
6000
6100
6200
1951
###Markdown
Loop over all of the soundings.
###Code
## Save soundings at 16 levels for later
u_soundings = np.zeros((len(sounding_times),16))
v_soundings = np.zeros((len(sounding_times),16))
t_soundings = np.zeros((len(sounding_times),16))
z_soundings = np.zeros((len(sounding_times),16))
dp_soundings = np.zeros((len(sounding_times),16))
time_soundings = []
pres_soundings = np.zeros((len(sounding_times),16))
rh_soundings = np.zeros((len(sounding_times),16))
count = 0
no_masked = 0
for time in sounding_times:
if(time.month <= 4 or time.month >= 9):
if(time.hour == 0):
pres_index = []
# Load sounding file
Sounding_netcdf = get_sounding(time)
# Convert timestamps to datetime format
p = Sounding_netcdf.variables['pres'][:]
wdir = Sounding_netcdf.variables['wdir'][:]
wspeed = Sounding_netcdf.variables['wspeed']
u = -np.sin(np.deg2rad(wdir))*wspeed
v = -np.cos(np.deg2rad(wdir))*wspeed
#u = Sounding_netcdf.variables['u_wind'][:]
#v = Sounding_netcdf.variables['v_wind'][:]
t = Sounding_netcdf.variables['temp'][:]
z = Sounding_netcdf.variables['height'][:]
#dp = Sounding_netcdf.variables['dp'][:]
#rh = Sounding_netcdf.variables['rh'][:]
# Take levels from the sounding and place them into the array
for pres in pres_levels:
pres_index.append(find_nearest(p,pres))
# Check for availability of 16 pressure levels
good_sounding = 1
for i in range(0,len(pres_levels)-1):
if(p[pres_index[i]] < pres_levels[i]-50 or
p[pres_index[i]] > pres_levels[i]+50):
good_sounding = 0
if(abs(u[pres_index[i]]) > 75 or
abs(v[pres_index[i]]) > 75):
good_sounding = 0
u = u[pres_index]
v = v[pres_index]
t = t[pres_index]
z = z[pres_index]
for i in range(0,len(u)):
if(u[i] < -75 or v[i] < -75 or
u[i] is np.ma.masked or
v[i] is np.ma.masked or
t[i] is np.ma.masked):
good_sounding = 0
if(t[0] < 0):
t[:] = float('nan')
good_sounding = 0
# If pressure levels are not available, mask the entire sounding
if(good_sounding == 0):
no_masked = no_masked + 1
else:
u_soundings[count][:] = u
v_soundings[count][:] = v
t_soundings[count][:] = t
z_soundings[count][:] = z
time_soundings.append(time)
pres_soundings[count][:] = pres_levels
count = count + 1
if(count % 100 == 0):
print(time)
u_soundings = u_soundings[0:count-1][:]
v_soundings = v_soundings[0:count-1][:]
t_soundings = t_soundings[0:count-1][:]
dp_soundings = dp_soundings[0:count-1][:]
z_soundings = z_soundings[0:count-1][:]
pres_soundings = pres_soundings[0:count-1][:]
print(str((no_masked/(count+no_masked)*100)) + '% of soundings masked')
###Output
4.960835509138381% of soundings masked
###Markdown
Time-height sectionsTest the Drosdowsky (1997) classification over the two months of TWP-ICE
###Code
# Input start and end times
start_year = 2006
start_month = 1
start_day = 1
start_time = datetime(year=start_year,
month=start_month,
day=start_day)
end_year = 2006
end_month = 3
end_day = 1
end_time = datetime(year=end_year,
month=end_month,
day=end_day)
indicies = []
datenums = []
i = 0
for times in time_soundings:
if(times >= start_time and times <= end_time):
indicies.append(i)
datenums.append(dates.date2num(times))
i = i + 1
datelocs = [datetime.strptime('2006-01-01', "%Y-%m-%d"),
datetime.strptime('2006-01-15', "%Y-%m-%d"),
datetime.strptime('2006-02-01', "%Y-%m-%d"),
datetime.strptime('2006-02-15', "%Y-%m-%d")]
x = dates.date2num(datelocs)
plt.figure(figsize=(10,10))
plt.subplot(311)
X,Y = np.meshgrid(pres_levels, datenums)
Z = u_soundings[indicies][:]
CS = plt.contour(Y, X, Z)
plt.clabel(CS, inline=1, fontsize=10)
plt.title('U (m/s)')
plt.gca().set_xticks(x)
# Set the xtick labels to correspond to just the dates you entered.
plt.gca().set_xticklabels([date.strftime("%Y-%m-%d") for date in datelocs])
# Calculate the pressure weighted DLM for each timestep
DLM = np.zeros(len(indicies))
U_300100 = np.zeros(len(indicies))
cape = np.zeros(len(indicies))
shear_u = np.zeros(len(indicies))
j = 0
g = -9.8 * units.meters * (units.seconds * units.seconds)
#x = np.multiply(dz[indicies1],
# np.divide((prof[indicies1] - t_mean[indicies1]),
# (t_mean[indicies1]).to('degK')))
for i in indicies:
print(sum(pres_levels[0:11]*u_soundings[i,0:11])/(np.sum(pres_levels[0:11])))
DLM[j] = np.sum(pres_levels[0:11]*u_soundings[i,0:11])/(np.sum(pres_levels[0:11]))
U_300100[j] = np.mean(u_soundings[i,13:15])
j = j + 1
classification_Drosdowsky = np.zeros(len(DLM))
j = 0
N = 1
break_spell = 0
U = 2.5
for i in range(0,len(DLM)):
print(DLM[i])
if(np.mean(DLM[i-N:i]) > U*(N+1)/N and DLM[i] > 0):
if(U_300100[i] < 0):
N = N + 1
else:
if(N > 1):
classification_Drosdowsky[j-N+1:j-1] = 1
N = 1
else:
if(N > 1):
classification_Drosdowsky[j-N+1:j-1] = 1
N = 1
j = j + 1
plt.subplot(312)
plt.plot_date(datenums,DLM)
plt.xlabel('Date')
plt.ylabel('DLM U Surface-500 hPa [m/s]')
plt.subplot(313)
plt.plot_date(datenums,classification_Drosdowsky)
plt.xlabel('Date')
plt.ylabel('Classification')
plt.gca().set_yticks([0,1])
plt.gca().set_yticklabels(['Break', 'Monsoon'])
plt.ylim((-1,2))
###Output
-0.49462780552751817
-1.1728035760508364
-2.9679781760558175
-3.1646596355211
-1.0678000742585059
-9.744153035718561
-3.5441056024616477
-1.8140225207219485
-2.8267710452274804
-0.5271991182675179
0.09042038980898988
1.6083878062190697
8.709639585012205
12.654354454797318
9.999263709211936
11.507626013473871
4.875856716990805
3.143023015077084
4.081150720768395
-6.814505542692455
15.857714829945174
16.020615384750243
16.43629877829843
17.2926171125479
17.422259259067097
16.587075898724915
16.99820076049136
13.868502572243239
8.988449404170067
5.915997629125662
3.3339861641013226
-2.2434052243565015
-2.4782841375293736
-5.391833869565991
-1.2751367708536714
-4.5370895132418445
-5.625813410438712
-3.345614446891761
-9.036868863717162
-6.39075523690188
-7.5271951962043255
-4.463969010601361
-6.856019371532365
-5.299808630263222
-6.282897975852891
-4.830915351737903
-2.9214362275597296
-2.4815999586929465
-4.487675816644978
-3.2729677971925306
-6.257875286097549
-6.4111843473312735
-8.075035214338842
-9.710003138633546
-0.49462780552751817
-1.1728035760508364
-2.9679781760558175
-3.1646596355211
-1.0678000742585059
-9.744153035718561
-3.5441056024616477
-1.8140225207219485
-2.8267710452274804
-0.5271991182675175
0.09042038980898988
1.6083878062190702
8.709639585012205
12.654354454797318
9.999263709211936
11.507626013473871
4.875856716990805
3.143023015077084
4.081150720768394
-6.814505542692455
15.857714829945174
16.020615384750243
16.436298778298426
17.292617112547898
17.422259259067097
16.587075898724915
16.99820076049136
13.868502572243235
8.988449404170067
5.9159976291256635
3.3339861641013218
-2.2434052243565015
-2.478284137529373
-5.391833869565991
-1.2751367708536716
-4.5370895132418445
-5.625813410438713
-3.3456144468917604
-9.036868863717162
-6.39075523690188
-7.527195196204326
-4.463969010601361
-6.856019371532365
-5.299808630263222
-6.282897975852891
-4.830915351737903
-2.9214362275597296
-2.4815999586929465
-4.487675816644979
-3.2729677971925306
-6.257875286097548
-6.411184347331274
-8.075035214338842
-9.710003138633546
###Markdown
Write classification from 2003 to 2015 to netCDF file
###Code
file_path = '/home/rjackson/data/Drosdowsky.cdf'
indicies = []
datenums = []
i = 0
for times in time_soundings:
indicies.append(i)
datenums.append(dates.date2num(times))
i = i + 1
# Calculate the pressure weighted DLM for each timestep
DLM = np.zeros(len(indicies)-1)
U_300100 = np.zeros(len(indicies)-1)
year = np.zeros(len(indicies)-1)
month = np.zeros(len(indicies)-1)
day = np.zeros(len(indicies)-1)
cape = np.zeros(len(indicies)-1)
shear = np.zeros(len(indicies)-1)
cin = np.zeros(len(indicies)-1)
rh500 = np.zeros(len(indicies)-1)
j = 0
g = 9.8 * units.meters * (units.seconds * units.seconds)
for i in range(0,len(indicies)-1):
DLM[j] = sum(pres_levels[0:11]*u_soundings[i,0:11]/(sum(pres_levels[0:11])))
U_300100[j] = np.mean(u_soundings[i,13:15])
shear[j] = u_soundings[i,11]-u_soundings[i,0]
year[j] = time_soundings[i].year
month[j] = time_soundings[i].month
day[j] = time_soundings[i].day
pres_levels = pres_soundings[i,:] * units.millibar
t_mean = t_soundings[i,:] * units.celsius
dp_mean = dp_soundings[i,:] * units.celsius
prof = mpcalc.parcel_profile(pres_levels, t_mean[0], dp_mean[0]).to('degK')
LI = prof-t_mean.to('degK')
dz = z_soundings[i][:]
dz[0] = z_soundings[i,0]
dz[1:] = z_soundings[i,1:]-z_soundings[i,0:-1]
dz = dz * units.meters
indicies1 = np.where(LI > 0 * units.kelvin)[0]
diff_indicies1 = deepcopy(indicies1)
diff_indicies1[1:] = indicies1[1:]-indicies1[:-1]
indicies1 = indicies1[np.where(diff_indicies1 < 3)]
x = np.multiply(dz[indicies1],
np.divide((prof[indicies1] - t_mean[indicies1].to('degK')),
(t_mean[indicies1]).to('degK')))
this = g*np.sum(x)
cape[j] = np.array(this)
indicies1 = np.where(LI < 0 * units.kelvin)[0]
diff_indicies1 = deepcopy(indicies1)
diff_indicies1[1:] = indicies1[1:]-indicies1[:-1]
indicies1 = indicies1[diff_indicies1 < 3]
x = np.multiply(dz[indicies1],
np.divide((prof[indicies1] - t_mean[indicies1].to('degK')),
(t_mean[indicies1]).to('degK')))
this = g*np.sum(x)
cin[j] = np.array(this)
rh500[j] = rh_soundings[i,11]
j = j + 1
classification_Drosdowsky = np.zeros(len(DLM))
j = 0
N = 1
break_spell = 0
U = 2.5
# Calculate Deep Layer Mean wind and classify time periods
for i in range(0,len(DLM)-1):
if(np.mean(DLM[i-N:i]) > U*(N+1)/N and DLM[i] > 0):
if(U_300100[i] < 0):
N = N + 1
else:
if(N > 1):
classification_Drosdowsky[j-N+1:j-1] = 1
N = 1
else:
if(N > 1):
classification_Drosdowsky[j-N+1:j-1] = 1
N = 1
j = j + 1
# Output to netCDF file
out_netcdf = Dataset(file_path, mode='w')
out_netcdf.createDimension('time', len(classification_Drosdowsky))
print(len(classification_Drosdowsky))
groups_file = out_netcdf.createVariable('groups', 'i4', ('time',))
groups_file.long_name = 'Drosdowsky classification regime'
groups_file.units = '0 = Break, 1 = Monsoon'
groups_file[:] = classification_Drosdowsky
years_file = out_netcdf.createVariable('year', year.dtype, ('time',))
years_file.long_name = 'Year'
years_file.units = 'YYYY'
years_file[:] = year
month_file = out_netcdf.createVariable('month', month.dtype, ('time',))
month_file.long_name = 'Month'
month_file.units = 'MM'
month_file[:] = month
day_file = out_netcdf.createVariable('day', day.dtype, ('time',))
day_file.long_name = 'Day'
day_file.units = 'DD'
day_file[:] = day
cape_file = out_netcdf.createVariable('cape', cape.dtype, ('time',))
cape_file.long_name = 'CAPE'
cape_file.units = 'J K-1 kg-1'
cape_file[:] = cape
shear_file = out_netcdf.createVariable('shear', shear.dtype, ('time',))
shear_file.long_name = 'Surface 500 hPa u shear'
shear_file.units = 'm s-1'
shear_file[:] = shear
rh500_file = out_netcdf.createVariable('rh500', shear.dtype, ('time',))
rh500_file.long_name = '500 RH'
rh500_file.units = 'percent'
rh500_file[:] = rh500
out_netcdf.close()
###Output
1819
###Markdown
Frequency of Monsoon/Break days in Darwin from Nov to May
###Code
file_path = '/home/rjackson/data/Drosdowsky.cdf'
in_netcdf = Dataset(file_path)
year = in_netcdf.variables['year'][:]
month = in_netcdf.variables['month'][:]
day = in_netcdf.variables['day'][:]
groups = in_netcdf.variables['groups'][:]
cape = in_netcdf.variables['cape'][:]
shear = in_netcdf.variables['shear'][:]
rh500 = in_netcdf.variables['rh500'][:]
times = []
for i in range(0, len(year)):
cur_time = datetime(year=int(year[i]),
month=int(month[i]),
day=int(day[i]))
times.append(cur_time)
Nov2005 = datetime(2005,11,1,0,0,1)
May2006 = datetime(2006,5,1,0,0,1)
Nov2006 = datetime(2006,11,1,0,0,1)
May2007 = datetime(2007,5,1,0,0,1)
Nov2009 = datetime(2009,11,1,0,0,1)
May2010 = datetime(2010,5,1,0,0,1)
Nov2010 = datetime(2010,11,1,0,0,1)
May2011 = datetime(2011,5,1,0,0,1)
times = np.array(times)
months = np.array([timer.month for timer in times])
time_indicies0506 = np.logical_and(times >= Nov2005,
times <= May2006)
time_indicies0607 = np.logical_and(times >= Nov2006,
times <= May2007)
time_indicies0910 = np.logical_and(times >= Nov2009,
times <= May2010)
time_indicies1011 = np.logical_and(times >= Nov2010,
times <= May2011)
times_summer = np.logical_or(months < 5, months > 10)
#include = np.logical_or(time_indicies0506, time_indicies0607)
#include = np.logical_or(include, time_indicies0910)
#include = np.logical_or(include, time_indicies1011)
include = np.where(times_summer)
year = year[include]
month = month[include]
day = day[include]
groups = groups[include]
cape = cape[include]
shear = shear[include]
rh500 = rh500[include]
month_bins = [11, 12, 1, 2, 3, 4]
num_monsoon = np.zeros(len(month_bins))
num_break = np.zeros(len(month_bins))
for i in range(0,len(month_bins)):
inds = np.where(np.logical_and(month == month_bins[i], groups == 1))
num_monsoon[i] = len(inds[0])
inds = np.where(np.logical_and(month == month_bins[i], groups == 0))
num_break[i] = len(inds[0])
mpl_fig = plt.figure()
ax = mpl_fig.add_subplot(111)
ax.bar(np.arange(0.6,6.6,1), num_break)
ax.bar(np.arange(0.6,6.6,1), num_monsoon, color='r')
ax.set_xticklabels(['','Nov', 'Dec', 'Jan', 'Feb', 'Mar', 'Apr'])
ax.set_xlabel('Month')
ax.set_ylabel('# of Days')
ax.legend(['Break', 'Monsoon'], loc='best')
print(num_monsoon[i])
print(num_break[i])
import pandas
mjo_index_file = '/home/rjackson/data/rmm.74toRealtime.txt'
data = pandas.read_csv(mjo_index_file,
header=2,
delim_whitespace=True)
data_matrix = np.ma.array(data.values)
year_m = data_matrix[:,0]
month_m = data_matrix[:,1]
day_m = data_matrix[:,2]
mjo = data_matrix[:,5]
amplitude = data_matrix[:,6]
mjo[mjo > 99].mask = True
amplitude[amplitude > 99].mask = True
times = []
for i in range(0, len(year)):
cur_time = datetime(year=int(year_m[i]),
month=int(month_m[i]),
day=int(day_m[i]))
times.append(cur_time)
Nov2005 = datetime(2005,11,1,0,0,1)
May2006 = datetime(2006,5,1,0,0,1)
Nov2006 = datetime(2006,11,1,0,0,1)
May2007 = datetime(2007,5,1,0,0,1)
Nov2009 = datetime(2009,11,1,0,0,1)
May2010 = datetime(2010,5,1,0,0,1)
Nov2010 = datetime(2010,11,1,0,0,1)
May2011 = datetime(2011,5,1,0,0,1)
times = np.array(times)
months = np.array([x.month for x in times])
time_indicies0506 = np.logical_and(times >= Nov2005,
times <= May2006)
time_indicies0607 = np.logical_and(times >= Nov2006,
times <= May2007)
time_indicies0910 = np.logical_and(times >= Nov2009,
times <= May2010)
time_indicies1011 = np.logical_and(times >= Nov2010,
times <= May2011)
print(year_m.shape)
print(include[0].shape)
year_m = year_m[include]
month_m = month_m[include]
day_m = day_m[include]
mjo = mjo[include]
amplitude = amplitude[include]
bins = range(0,10)
mjo_hist_break, bins_break = np.histogram(mjo[groups == 0], bins=bins)
mjo_hist_monsoon, bins_monsoon = np.histogram(mjo[groups == 1], bins=bins)
plt.bar(bins_break[:-1]-0.4, mjo_hist_break, color='b')
plt.bar(bins_break[:-1]-0.4, mjo_hist_monsoon, color='r')
plt.gca().set_xticks([1,2,3,4,5,6,7,8])
plt.xlim([0, 8.7])
plt.xlabel('MJO index')
plt.ylabel('# of days')
plt.legend(['Break', 'Monsoon'], loc='best')
print(mjo_hist_break)
print(sum(mjo_hist_monsoon[[0,1,2,7]]))
print(sum(mjo_hist_monsoon[3:7]))
bins = range(0, 4500, 200)
cin_bins = range(-200, 0, 5)
cape[cape < 0] = 0
cape_hist_break, bins_break = np.histogram(cape[groups == 0],
bins=bins,
normed=True)
cape_hist_monsoon, bins_monsoon = np.histogram(cape[groups == 1],
bins=bins,
normed=True)
plt.figure(figsize=(5,5))
plt.step(bins_break[:-1], cape_hist_break*200, color='b', linewidth=2)
plt.step(bins_monsoon[:-1], cape_hist_monsoon*200, color='r', linewidth=2)
plt.ylabel('Normalized frequency')
plt.legend(['Break', 'Monsoon'])
plt.xlabel('CAPE [J $kg^{-1}$]')
bins = np.arange(0,5,0.5)
amp_b47 = amplitude[np.where(np.logical_and(groups == 0, np.logical_and(mjo >= 4, mjo<=7)))]
amp_m47 = amplitude[np.where(np.logical_and(groups == 1, np.logical_and(mjo >= 4, mjo<=7)))]
amp_b13 = amplitude[np.where(np.logical_and(groups == 0, np.logical_or(mjo == 8, mjo<=3)))]
amp_m13 = amplitude[np.where(np.logical_and(groups == 1, np.logical_or(mjo == 8, mjo<=3)))]
fig = plt.figure(figsize=(9,4,))
ax = fig.add_subplot(111)
plt.boxplot([list(amp_b47), list(amp_m47), list(amp_b13), list(amp_m13)])
plt.xlabel('Large scale forcing')
plt.ylabel('MJO amplitude')
ax.set_xticklabels(['Break MJO elsewhere', 'Monsoon MJO elsewhere', 'Break MJO Australia', 'Monsoon MJO Australia'])
bins = range(0, 4000, 250)
cin_bins = range(-200, 0, 5)
cape[cape < 0] = 0
break_14 = np.where(np.logical_and(groups == 0,
np.logical_or(mjo == 8,
mjo <= 3)))
break_58 = np.where(np.logical_and(groups == 0,
np.logical_and(mjo >= 4,
mjo <= 7)))
monsoon_14 = np.where(np.logical_and(groups == 1,
np.logical_or(mjo == 8,
mjo <= 3)))
monsoon_58 = np.where(np.logical_and(groups == 1,
np.logical_and(mjo >= 4,
mjo <= 7)))
cape_hist_break14, bins_break14 = np.histogram(cape[break_14],
bins=bins,
normed=True)
cape_hist_monsoon14, bins_monsoon14 = np.histogram(cape[monsoon_14],
bins=bins,
normed=True)
cape_hist_break58, bins_break58 = np.histogram(cape[break_58],
bins=bins,
normed=True)
cape_hist_monsoon58, bins_monsoon58 = np.histogram(cape[monsoon_58],
bins=bins,
normed=True)
plt.figure(figsize=(7,7))
plt.step(bins_break14[:-1],
cape_hist_break14,
color='b',
linewidth=2)
plt.step(bins_monsoon14[:-1],
cape_hist_monsoon14,
color='r',
linewidth=2)
plt.step(bins_break58[:-1],
cape_hist_break58,
color='b',
linewidth=3,
linestyle='--',
)
plt.step(bins_monsoon58[:-1],
cape_hist_monsoon58,
color='r',
linewidth=3,
linestyle='--'
)
plt.legend(['Break MJO active', 'Monsoon MJO active', 'Break MJO inactive', 'Monsoon MJO inactive'])
plt.xlabel('CAPE [J $kg^{-1}$]')
bins = range(-25, 25, 1)
cin_bins = range(-200, 0, 5)
cape[cape < 0] = 0
#cin[cin > 0] = 0
shear_hist_break14, bins_break14 = np.histogram(shear[break_14],
bins=bins,
normed=True)
shear_hist_monsoon14, bins_monsoon14 = np.histogram(shear[monsoon_14],
bins=bins,
normed=True)
shear_hist_break58, bins_break58 = np.histogram(shear[break_58],
bins=bins,
normed=True)
shear_hist_monsoon58, bins_monsoon58 = np.histogram(shear[monsoon_58],
bins=bins,
normed=True)
plt.figure(figsize=(5,5))
plt.step(bins_break[:-1], shear_hist_break14, color='b', linewidth=2)
plt.step(bins_monsoon[:-1], shear_hist_monsoon14, color='r', linewidth=2)
plt.step(bins_break[:-1], shear_hist_break58, color='b', linestyle='--', linewidth=2)
plt.step(bins_monsoon[:-1], shear_hist_monsoon58, color='r', linestyle='--', linewidth=2)
plt.legend(['Break MJO inactive', 'Monsoon MJO inactive', 'Break MJO active', 'Monsoon MJO inactive'])
plt.xlabel('Surface-500 hPa zonal shear [$m s^{-1}$] ')
plt.ylabel('Normalized frequency')
plt.xlim([-30,30])
print(np.sum(shear_hist_monsoon[1:25]))
bins = range(0, 100, 5)
cin_bins = range(-200, 0, 5)
cape[cape < 0] = 0
cin[cin > 0] = 0
rh_hist_break, bins_break = np.histogram(rh500[groups == 0],
bins=bins,
normed=True)
rh_hist_monsoon, bins_monsoon = np.histogram(rh500[groups == 1],
bins=bins,
normed=True)
plt.figure(figsize=(5,5))
plt.step(bins_break[:-1], rh_hist_break*5, color='b', linewidth=2)
plt.step(bins_monsoon[:-1], rh_hist_monsoon*5, color='r', linewidth=2)
plt.legend(['Break', 'Monsoon'], loc='best')
plt.xlabel('500 hPa RH [%] ')
plt.ylabel('Normalized frequency')
plt.xlim([0,90])
print(np.sum(rh_hist_monsoon[:9]*5))
###Output
0.128571428571
###Markdown
Use Drosdowsky (1996) classification on sounding data
###Code
from sklearn.cluster import KMeans
from sklearn.preprocessing import scale
import numpy as np
import numpy.ma as ma
from netCDF4 import Dataset
from datetime import datetime, timedelta
import glob
from copy import deepcopy
import math
from matplotlib import pyplot as plt
from matplotlib import dates
%matplotlib inline
import metpy.calc as mpcalc
data_path_sounding = '/home/rjackson/data/soundings/'
# get_sounding)times
# start_year = Start year of animation
# start_month = Start month of animation
# start_day = Start day of animation
# start_hour = Start hour of animation
# end_year = End year of animation
# end_month = End month of animation
# end_day = End day of animation
# end_minute = End minute of animation
# minute_interval = Interval in minutes between scans (default is 5)
# This procedure acquires an array of sounding times between start_time and end_time.
# Only 23 UTC is loaded following Pope et al. (2008)
def get_sounding_times(start_year, start_month, start_day,
start_hour, start_minute, end_year,
end_month, end_day, end_hour,
end_minute, minute_interval=5):
start_time = datetime(start_year,
start_month,
start_day,
start_hour,
start_minute,
)
end_time = datetime(end_year,
end_month,
end_day,
end_hour,
end_minute,
)
deltatime = end_time - start_time
if(deltatime.seconds > 0 or deltatime.minutes > 0):
no_days = deltatime.days + 1
else:
no_days = deltatime.days
if(start_day != end_day):
no_days = no_days + 1
days = np.arange(0, no_days, 1)
print('We are about to load sounding files for ' + str(no_days) + ' days')
# Find the list of files for each day
cur_time = start_time
file_list = []
time_list = []
for i in days:
year_str = "%04d" % cur_time.year
day_str = "%02d" % cur_time.day
month_str = "%02d" % cur_time.month
format_str = (data_path_sounding +
'twpsondewnpnC3.b1.' +
year_str +
month_str +
day_str +
'*.23*'
'*custom.cdf')
data_list = glob.glob(format_str)
if(i % 100 == 0):
print(i)
for j in range(0, len(data_list)):
file_list.append(data_list[j])
cur_time = cur_time + timedelta(days=1)
# Parse all of the dates and time in the interval and add them to the time list
past_time = []
for file_name in file_list:
date_str = file_name[-26:-11]
year_str = date_str[0:4]
month_str = date_str[4:6]
day_str = date_str[6:8]
hour_str = date_str[9:11]
minute_str = date_str[11:13]
second_str = date_str[13:15]
cur_time = datetime(int(year_str),
int(month_str),
int(day_str),
int(hour_str),
int(minute_str),
int(second_str))
time_list.append(cur_time)
# Sort time list and make sure time are at least xx min apart
time_list.sort()
time_list_sorted = deepcopy(time_list)
time_list_final = []
past_time = []
for times in time_list_sorted:
cur_time = times
if(past_time == []):
past_time = cur_time
if(cur_time - past_time >= timedelta(minutes=minute_interval)
and cur_time >= start_time and cur_time <= end_time):
time_list_final.append(cur_time)
past_time = cur_time
return time_list_final
# Get a Radar object given a time period in the CPOL dataset
def get_sounding(time):
year_str = "%04d" % time.year
month_str = "%02d" % time.month
day_str = "%02d" % time.day
hour_str = "%02d" % time.hour
minute_str = "%02d" % time.minute
second_str = "%02d" % time.second
file_name_str = (data_path_sounding +
'twpsondewnpnC3.b1.' +
year_str +
month_str +
day_str +
'.' +
hour_str +
minute_str +
second_str +
'.custom.cdf')
sounding = Dataset(file_name_str, mode='r')
return sounding
###Output
_____no_output_____
###Markdown
Load netCDF sounding datasurface (assumedhere to be 1013 hPa), 950, 925, 900, 850, 800, 750, 700,650, 600, 550, 500, 400, 300, 200, and 100 hPa (a total of16 levels)
###Code
def find_nearest(array,value):
idx = (np.abs(array-value)).argmin()
return idx
###Output
_____no_output_____
###Markdown
Here is where we input the times and pressure levels to get sounding from.
###Code
start_year = 2002
end_year = 2015
sounding_times = get_sounding_times(start_year,1,1,0,1,
end_year,1,1,23,1)
pres_levels = [1013, 950, 925, 900, 850, 800, 750, 700,
650, 600, 550, 500, 400, 300, 200, 100]
print(len(sounding_times))
###Output
We are about to load sounding files for 4749 days
0
100
200
300
400
500
600
700
800
900
1000
1100
1200
1300
1400
1500
1600
1700
1800
1900
2000
2100
2200
2300
2400
2500
2600
2700
2800
2900
3000
3100
3200
3300
3400
3500
3600
3700
3800
3900
4000
4100
4200
4300
4400
4500
4600
4700
4493
###Markdown
Loop over all of the soundings.
###Code
## Save soundings at 16 levels for later
u_soundings = np.zeros((len(sounding_times),16))
v_soundings = np.zeros((len(sounding_times),16))
t_soundings = np.zeros((len(sounding_times),16))
z_soundings = np.zeros((len(sounding_times),16))
dp_soundings = np.zeros((len(sounding_times),16))
time_soundings = []
pres_soundings = np.zeros((len(sounding_times),16))
count = 0
no_masked = 0
for time in sounding_times:
if(time.month <= 4 or time.month >= 9):
if(time.hour == 23):
pres_index = []
# Load sounding file
Sounding_netcdf = get_sounding(time)
# Convert timestamps to datetime format
p = Sounding_netcdf.variables['pres'][:]
u = Sounding_netcdf.variables['u_wind'][:]
v = Sounding_netcdf.variables['v_wind'][:]
t = Sounding_netcdf.variables['tdry'][:]
z = Sounding_netcdf.variables['alt'][:]
dp = Sounding_netcdf.variables['dp'][:]
# Take levels from the sounding and place them into the array
for pres in pres_levels:
pres_index.append(find_nearest(p,pres))
# Check for availability of 16 pressure levels
good_sounding = 1
for i in range(0,len(pres_levels)-1):
if(p[pres_index[i]] < pres_levels[i]-20 or
p[pres_index[i]] > pres_levels[i]+20):
good_sounding = 0
if(abs(u[pres_index[i]]) > 75 or
abs(v[pres_index[i]]) > 75):
good_sounding = 0
u = u[pres_index]
v = v[pres_index]
t = t[pres_index]
z = z[pres_index]
dp = dp[pres_index]
for i in range(0,len(u)):
if(u[i] < -75 or v[i] < -75 or
u[i] is np.ma.masked or
v[i] is np.ma.masked or
dp[i] is np.ma.masked or
t[i] is np.ma.masked or
dp[i] < -99):
good_sounding = 0
if(t[0] < 0):
t[:] = float('nan')
good_sounding = 0
# If pressure levels are not available, mask the entire sounding
if(good_sounding == 0):
no_masked = no_masked + 1
else:
u_soundings[count][:] = u
v_soundings[count][:] = v
t_soundings[count][:] = t
dp_soundings[count][:] = dp
z_soundings[count][:] = z
time_soundings.append(time)
pres_soundings[count][:] = pres_levels
count = count + 1
if(count % 100 == 0):
print(time)
u_soundings = u_soundings[0:count-1][:]
v_soundings = v_soundings[0:count-1][:]
t_soundings = t_soundings[0:count-1][:]
dp_soundings = dp_soundings[0:count-1][:]
z_soundings = z_soundings[0:count-1][:]
pres_soundings = pres_soundings[0:count-1][:]
print(str((no_masked/(count+no_masked)*100)) + '% of soundings masked')
###Output
12.589316093909492% of soundings masked
###Markdown
Time-height sectionsTest the Drosdowsky (1997) classification over the two months of TWP-ICE
###Code
start_year = 2006
start_month = 1
start_day = 1
start_time = datetime(year=start_year,
month=start_month,
day=start_day)
end_year = 2006
end_month = 3
end_day = 1
end_time = datetime(year=end_year,
month=end_month,
day=end_day)
indicies = []
datenums = []
i = 0
for times in time_soundings:
if(times >= start_time and times <= end_time):
indicies.append(i)
datenums.append(dates.date2num(times))
i = i + 1
datelocs = [datetime.strptime('2006-01-01', "%Y-%m-%d"),
datetime.strptime('2006-01-15', "%Y-%m-%d"),
datetime.strptime('2006-02-01', "%Y-%m-%d"),
datetime.strptime('2006-02-15', "%Y-%m-%d")]
x = dates.date2num(datelocs)
plt.figure(figsize=(8,20))
plt.subplot(311)
X,Y = np.meshgrid(pres_levels, datenums)
Z = u_soundings[indicies][:]
CS = plt.contour(Y, X, Z)
plt.clabel(CS, inline=1, fontsize=10)
plt.title('U (m/s)')
plt.gca().set_xticks(x)
# Set the xtick labels to correspond to just the dates you entered.
plt.gca().set_xticklabels([date.strftime("%Y-%m-%d") for date in datelocs])
# Calculate the pressure weighted DLM for each timestep
DLM = np.zeros(len(indicies))
U_300100 = np.zeros(len(indicies))
j = 0
for i in indicies:
DLM[j] = sum(pres_levels[0:11]*u_soundings[i,0:11]/(sum(pres_levels[0:11])))
U_300100[j] = np.mean(u_soundings[i,13:15])
j = j + 1
classification_Drosdowsky = np.zeros(len(DLM))
j = 0
N = 1
break_spell = 0
U = 2.5
for i in range(0,len(DLM)):
if(np.mean(DLM[i-N:i]) > U*(N+1)/N and DLM[i] > 0):
if(U_300100[i] < 0):
N = N + 1
else:
if(N > 1):
classification_Drosdowsky[j-N+1:j-1] = 1
N = 1
else:
if(N > 1):
classification_Drosdowsky[j-N+1:j-1] = 1
N = 1
j = j + 1
plt.subplot(312)
plt.plot_date(datenums,DLM)
plt.xlabel('Date')
plt.ylabel('DLM U Surface-500 hPa [m/s]')
plt.subplot(313)
plt.plot_date(datenums,classification_Drosdowsky)
plt.xlabel('Date')
plt.ylabel('Classification')
plt.gca().set_yticks([0,1])
plt.gca().set_yticklabels(['Break', 'Monsoon'])
plt.ylim((-1,2))
###Output
/home/rjackson/anaconda3/lib/python3.5/site-packages/matplotlib/contour.py:370: RuntimeWarning: invalid value encountered in true_divide
dist = np.add.reduce(([(abs(s)[i] / L[i]) for i in range(xsize)]), -1)
/home/rjackson/anaconda3/lib/python3.5/site-packages/numpy/core/_methods.py:59: RuntimeWarning: Mean of empty slice.
warnings.warn("Mean of empty slice.", RuntimeWarning)
/home/rjackson/anaconda3/lib/python3.5/site-packages/numpy/core/_methods.py:70: RuntimeWarning: invalid value encountered in double_scalars
ret = ret.dtype.type(ret / rcount)
###Markdown
Write classification from 2003 to 2015 to netCDF file
###Code
file_path = '/home/rjackson/data/Drosdowsky.cdf'
indicies = []
datenums = []
i = 0
for times in time_soundings:
indicies.append(i)
datenums.append(dates.date2num(times))
i = i + 1
# Calculate the pressure weighted DLM for each timestep
DLM = np.zeros(len(indicies)-1)
U_300100 = np.zeros(len(indicies)-1)
year = np.zeros(len(indicies)-1)
month = np.zeros(len(indicies)-1)
day = np.zeros(len(indicies)-1)
j = 0
for i in range(0,len(indicies)-1):
DLM[j] = sum(pres_levels[0:11]*u_soundings[i,0:11]/(sum(pres_levels[0:11])))
U_300100[j] = np.mean(u_soundings[i,13:15])
year[j] = time_soundings[i].year
month[j] = time_soundings[i].month
day[j] = time_soundings[i].day
j = j + 1
classification_Drosdowsky = np.zeros(len(DLM))
j = 0
N = 1
break_spell = 0
U = 2.5
# Calculate Deep Layer Mean wind and classify time periods
for i in range(0,len(DLM)-1):
if(np.mean(DLM[i-N:i]) > U*(N+1)/N and DLM[i] > 0):
if(U_300100[i] < 0):
N = N + 1
else:
if(N > 1):
classification_Drosdowsky[j-N+1:j-1] = 1
N = 1
else:
if(N > 1):
classification_Drosdowsky[j-N+1:j-1] = 1
N = 1
j = j + 1
out_netcdf = Dataset(file_path, mode='w')
out_netcdf.createDimension('time', len(classification_Drosdowsky))
print(len(classification_Drosdowsky))
groups_file = out_netcdf.createVariable('groups', 'i4', ('time',))
groups_file.long_name = 'Pope classification regime'
groups_file.units = '0 = Break, 1 = Monsoon'
groups_file[:] = classification_Drosdowsky
years_file = out_netcdf.createVariable('year', year.dtype, ('time',))
years_file.long_name = 'Year'
years_file.units = 'YYYY'
years_file[:] = year
month_file = out_netcdf.createVariable('month', month.dtype, ('time',))
month_file.long_name = 'Month'
month_file.units = 'MM'
month_file[:] = month
day_file = out_netcdf.createVariable('day', day.dtype, ('time',))
day_file.long_name = 'Day'
day_file.units = 'DD'
day_file[:] = day
out_netcdf.close()
###Output
2568
|
trigger_word_detector/trigger_word_rnn_model.ipynb | ###Markdown
Trigger word rnn modelThis notebook is dedicated to developing the rnn model to support this trigger word detection task. The rnn model being implemented for this task consists of a conv1D step to make the input_size=(1,1375,filters) including dropout and batch normalization, and a multilayered rnn consisting of two GRU cells and dropout. All dropout has a keep_prob=0.8.In order the show an example of using this model, I will be using the trained model to predict where a trigger word is said in an input and compare that vector to the actual vector of when the trigger word is said.
###Code
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import latex
import spacy
import math
import IPython
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
np.random.seed(10)
import keras.backend as k
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
# Loading data
X_data = np.load("../../data/trigger_word/data/x.npy")
y_data = np.load("../../data/trigger_word/data/y.npy")
X_data.shape = (1000,1402,118)
y_data.shape = (1000,1375)
# Training data will be first 10 recordings, and these are unshuffled sets of training ex.
X_train = X_data[10:] # unshuffled
y_train = y_data[10:]
X_test = X_data[:10]
y_test = y_data[:10]
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
# Placeholder functions for model input
def get_placeholders():
X = tf.placeholder(tf.float32, shape=[None,1402,118], name='X')
y = tf.placeholder(tf.float32, shape=[None,1375], name='Y')
return X,y
# Forward prop step consisting of conv 1d, multilayered rnn, and dropout usage
def rnn_cell(the_input):
# Conv 1D step:
Z = tf.layers.conv1d(the_input,filters=196,kernel_size=28,kernel_initializer=tf.contrib.layers.xavier_initializer(seed=0))
Bn = tf.layers.batch_normalization(Z)
A = tf.nn.relu(Bn)
D = tf.nn.dropout(A,keep_prob=0.8)
# Multilayered GRU units with dropout:
cell1 = tf.nn.rnn_cell.GRUCell(128,kernel_initializer=tf.contrib.layers.xavier_initializer(seed=0))
cell1 = tf.nn.rnn_cell.DropoutWrapper(cell1,output_keep_prob=0.8)
cell2 = tf.nn.rnn_cell.GRUCell(128,kernel_initializer=tf.contrib.layers.xavier_initializer(seed=0))
cell2 = tf.nn.rnn_cell.DropoutWrapper(cell2,output_keep_prob=0.8)
multi_cell = tf.nn.rnn_cell.MultiRNNCell([cell1,cell2]) # multilayered cell
outputs,curr_state = tf.nn.dynamic_rnn(multi_cell,inputs=D,dtype=tf.float32)
flats = tf.map_fn(lambda x: tf.layers.flatten(x),outputs)
out = tf.map_fn(lambda x: tf.layers.dense(x,1,activation=None,kernel_initializer=tf.contrib.layers.xavier_initializer(seed=0),reuse=tf.AUTO_REUSE),flats)
out = tf.reshape(out,[1,1375])
#flats = [tf.layers.flatten(out) for out in outputs] # - for eager_execution
#out = [tf.layers.dense(flat,1,activation=None,kernel_initializer=tf.contrib.layers.xavier_initializer(),reuse=tf.AUTO_REUSE) for flat in flats]
return out
# Cost function, uses sigmoid cross entropy with logits to compare the predictions at each 1375 step
def cost_function(logits,y):
return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=y,logits=logits))
# Sigmoid prediction for a given vector of logits, returns both a sigmoid activation output and discrete classes - 0,1
def prediction(logits):
sigmoid_out = tf.nn.sigmoid(logits)
desc_out = tf.to_int32(sigmoid_out > 0.5)
return sigmoid_out,desc_out
# Testing forward prop and cost function
# Example cost is large because sigmoid cross entropy is being used on 1375 epochs per training example
ax = X_train[0]
ax.shape = (1,1402, 118)
ay = y_train[0]
ay.shape = (1,1375)
tf.reset_default_graph()
x,y = get_placeholders()
out = rnn_cell(x)
cost = cost_function(out,y)
pred = prediction(out)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
aout = sess.run(out,feed_dict={x:ax,y:ay})
acost = sess.run(cost,feed_dict={x:ax,y:ay})
apred,adesc_pred = sess.run(pred,feed_dict={x:ax,y:ay})
print("output shape:",aout.shape)
print("example cost:",acost)
print("example predictions:",apred)
print("descrete predictions:",adesc_pred)
# Model for trigger word detection using stochastic gradient descent w/ gradient clipping
def model(X_train,y_train,lr=0.0001,num_epochs=25,retrain=True,print_each=True):
tf.reset_default_graph() # resetting graph
tf.set_random_seed(1)
seed=0
costs=[]
x,y = get_placeholders()
logits = rnn_cell(x)
cost = cost_function(logits,y)
pred = prediction(logits) # get binary predictions and sigmoid activation output
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
grads, variables = zip(*optimizer.compute_gradients(cost))
grads, _ = tf.clip_by_global_norm(grads, 5.0) # gradient clipping
optimizer = optimizer.apply_gradients(zip(grads, variables))
init = tf.global_variables_initializer()
saver = tf.train.Saver() # to save/load model
with tf.Session() as sess:
if retrain:
saver = tf.train.import_meta_graph("../../data/trigger_word/model/trigger_model.ckpt.meta")
saver.restore(sess, "../../data/trigger_word/model/trigger_model.ckpt")
else:
sess.run(init) # only run init when you are training a new model
for epoch in range(num_epochs):
running_cost = 0
seed += 1
X_train = shuffle(X_train,random_state=seed) # shuffle data for each epoch
y_train = shuffle(y_train,random_state=seed)
training_set = [(X_train[i],y_train[i]) for i in range(len(X_train))]
for aset in training_set:
(ax,ay) = aset
ax.shape = (1,1402, 118)
ay.shape = (1,1375)
_,temp_cost = sess.run([optimizer,cost], feed_dict={x:ax,y:ay})
if print_each:
print("cost increase:",temp_cost)
running_cost += temp_cost
costs.append(running_cost)
if (epoch+1) % 1 == 0: # printing costs
print("Cost at epoch {}: {}".format(epoch+1,running_cost))
# model saved on every iteration
loc = saver.save(sess, "../../data/trigger_word/model/trigger_model.ckpt")
return costs
###Output
_____no_output_____
###Markdown
Training model
###Code
costs1 = model(X_train,y_train,lr=0.0001,num_epochs=25,retrain=False,print_each=False)
costs2 = model(X_train,y_train,lr=0.0001,num_epochs=25,retrain=True,print_each=False)
costs3 = model(X_train,y_train,lr=0.0001,num_epochs=25,retrain=True,print_each=False)
costs4 = model(X_train,y_train,lr=0.00003,num_epochs=25,retrain=True,print_each=False)
costs5 = model(X_train,y_train,lr=0.00003,num_epochs=25,retrain=True,print_each=False)
costs6 = model(X_train,y_train,lr=0.00003,num_epochs=25,retrain=True,print_each=False)
print(costs7)
costs8 = model(X_train,y_train,lr=0.00001,num_epochs=25,retrain=True,print_each=False)
# Sample of the high level of variability in cost over time
tot_cost = costs1 + costs2 + costs3 + costs4 + costs5 + costs6 + costs7 + costs8
x_i = [i for i in range(len(tot_cost))]
plt.plot(x_i,tot_cost)
plt.xlabel("epoch")
plt.ylabel("cost")
plt.show()
costs9 = model(X_train,y_train,lr=0.000003,num_epochs=25,retrain=True,print_each=False)
costs10 = model(X_train,y_train,lr=0.000001,num_epochs=25,retrain=True,print_each=False)
costs11 = model(X_train,y_train,lr=0.0000003,num_epochs=25,retrain=True,print_each=False)
# Sample of the high level of variability in cost over time
tot_cost = costs1 + costs2 + costs3 + costs4 + costs5 + costs6 + costs7 + costs8 + costs9 + costs10 + costs11
x_i = [i for i in range(len(tot_cost))]
plt.plot(x_i,tot_cost)
plt.xlabel("epoch")
plt.ylabel("cost")
plt.show()
###Output
_____no_output_____
###Markdown
Predicting using the model
###Code
# Getting a prediction for a particular input audio clip
ax = X_train[50]
ax.shape = (1,1402, 118)
tf.reset_default_graph() # computation graph to get binary predictions
x,y = get_placeholders()
out = rnn_cell(x)
pred = prediction(out)
saver = tf.train.Saver()
with tf.Session() as sess:
saver = tf.train.import_meta_graph("../../data/trigger_word/model/trigger_model.ckpt.meta")
saver.restore(sess, "../../data/trigger_word/model/trigger_model.ckpt")
apred,adesc_pred = sess.run(pred,feed_dict={x:ax})
print("descrete predictions:",adesc_pred)
act_y = y_train[50]
act_y.shape = (1375)
pred_y = adesc_pred
pred_y.shape = (1375)
f_name = "../../data/trigger_word/training/train61.wav"
IPython.display.Audio(f_name)
plt.plot(act_y)
plt.title("actual")
plt.show()
plt.plot(pred_y)
plt.title("prediction")
plt.show()
###Output
_____no_output_____
###Markdown
Example of using this model to sample from a stream of audioGiven that this model takes in 10 second clips to make predictions, we will make this process semi-realtime by sampling audio every 0.5 seconds. This means that we will have a running 10 second clip with 0.5 seconds being shaved off the end and 0.5 seconds of new audio will be added to the beginning. We will then save 1/20 of the output to represent the next series of audio predictions.In this example I will be using two clips to represent the first 10 seconds and then a stream of 20 0.5 second audio clips.
###Code
input1 = X_train[50]
input2 = X_train[50][0:1400,:] # removing 2 timesteps to make math easier
print(input1.shape)
print(input2.shape)
# Simulating a stream of audio data with predictions
tf.reset_default_graph() # computation graph to get binary predictions
x,y = get_placeholders()
out = rnn_cell(x)
pred = prediction(out)
no_iter = 0
saver = tf.train.Saver()
with tf.Session() as sess:
saver = tf.train.import_meta_graph("../../data/trigger_word/model/trigger_model.ckpt.meta")
saver.restore(sess, "../../data/trigger_word/model/trigger_model.ckpt")
while(no_iter<21):
if no_iter == 0: # predict the entire first 10 seconds
ax = input1
ax.shape = (1,1402, 118)
apred,adesc_pred = sess.run(pred,feed_dict={x:ax})
all_preds = adesc_pred[0].tolist() # used for saving the predicted labels
no_iter += 1
else: # simulating additional streaming data
ax.shape = (1402,118)
ax = np.concatenate((ax[70:,:],input2[70*(no_iter-1):70*no_iter,:])) # updating input to model
ax.shape = (1,1402, 118)
apred,adesc_pred = sess.run(pred,feed_dict={x:ax})
all_preds += adesc_pred[:,1306:][0].tolist()
no_iter += 1
# This shows the stream of predictions, which aligns with the correct output
plt.plot(all_preds)
plt.show()
###Output
_____no_output_____ |
notebooks/05 Unsupervised Learning - Clustering.ipynb | ###Markdown
Unsupervised Learning Part 2 -- Clustering Clustering is the task of gathering samples into groups of similarsamples according to some predefined similarity or distance (dissimilarity)measure, such as the Euclidean distance. In this section we will explore a basic clustering task on some synthetic and real-world datasets.Here are some common applications of clustering algorithms:- Compression for data reduction- Summarizing data as a reprocessing step for recommender systems- Similarly: - grouping related web news (e.g. Google News) and web search results - grouping related stock quotes for investment portfolio management - building customer profiles for market analysis- Building a code book of prototype samples for unsupervised feature extraction Let's start by creating a simple, 2-dimensional, synthetic dataset:
###Code
from sklearn.datasets import make_blobs
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
X.shape
plt.scatter(X[:, 0], X[:, 1]);
###Output
_____no_output_____
###Markdown
In the scatter plot above, we can see three separate groups of data points and we would like to recover them using clustering -- think of "discovering" the class labels that we already take for granted in a classification task.Even if the groups are obvious in the data, it is hard to find them when the data lives in a high-dimensional space, which we can't visualize in a single histogram or scatterplot.This is the class separation we would like to recover, using only the feature data and not the labels of the dataset:
###Code
plt.scatter(X[:, 0], X[:, 1], c=y);
###Output
_____no_output_____
###Markdown
K means The clustering algorithm we'll look at is called "K means." This is because the algorithm works by fitting *k* center of mean points over the data. We'll choose `k=3` at first. To begin with, 3 random data points are chosen to be the first centers:
###Code
from sklearn.metrics import pairwise_distances_argmin
# 1. Randomly choose clusters
n_clusters = 3
rng = np.random.RandomState(random_state)
i = rng.permutation(X.shape[0])[:n_clusters]
centers = X[i]
print(i)
print(centers)
labels = pairwise_distances_argmin(X, centers)
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='viridis');
plt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.8);
###Output
_____no_output_____
###Markdown
The labels are decided based on which center (mean) is closest to the point. Note that this plot is in two dimensions, but distance can be calculated in any number of dimensions, over all features of the dataset.Once the current labels are calculated, re-calculate the center points as the mean position over all points with the associated label.
###Code
new_centers = np.array([X[labels == i].mean(0) for i in range(n_clusters)])
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='viridis');
plt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.3);
plt.scatter(new_centers[:, 0], new_centers[:, 1], c='black', s=200, alpha=0.8);
###Output
_____no_output_____
###Markdown
Repeat this process of labeling the data and recalculating the centers until the centers no longer move:
###Code
while True:
# 2a. Assign labels based on closest center
labels = pairwise_distances_argmin(X, centers)
# 2b. Find new centers from means of points
new_centers = np.array([X[labels == i].mean(0)
for i in range(n_clusters)])
# 2c. Check for convergence
if np.all(centers == new_centers):
break
centers = new_centers
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='viridis');
plt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.8);
###Output
_____no_output_____
###Markdown
Scikit learn makes this simple for us by providing a KMeans class.
###Code
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=3, random_state=random_state)
###Output
_____no_output_____
###Markdown
We can get the cluster labels either by calling fit and then accessing the ``labels_`` attribute of the K means estimator, or by calling ``fit_predict``.Either way, the result contains the ID of the cluster that each point is assigned to.
###Code
labels = kmeans.fit_predict(X)
labels
###Output
_____no_output_____
###Markdown
Let's visualize the assignments that have been found
###Code
plt.scatter(X[:, 0], X[:, 1], c=labels);
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], c='black', s=200, alpha=0.8);
###Output
_____no_output_____
###Markdown
Compared to the true labels:
###Code
plt.scatter(X[:, 0], X[:, 1], c=y);
###Output
_____no_output_____
###Markdown
Here, we are probably satisfied with the clustering results. But in general we might want to have a more quantitative evaluation. How about comparing our cluster labels with the ground truth we got when generating the blobs?
###Code
from sklearn.metrics import confusion_matrix, accuracy_score
print('Accuracy score:', accuracy_score(y, labels))
print(confusion_matrix(y, labels))
np.mean(y == labels)
###Output
_____no_output_____
###Markdown
Why is this 0.0 and not 1.0? Even though we recovered the partitioning of the data into clusters perfectly, the cluster IDs we assigned were arbitrary,and we can not hope to recover them. Therefore, we must use a different scoring metric, such as ``adjusted_rand_score``, which is invariant to permutations of the labels:
###Code
from sklearn.metrics import adjusted_rand_score
adjusted_rand_score(y, labels)
###Output
_____no_output_____
###Markdown
One of the "short-comings" of K-means is that we have to specify the number of clusters, which we often don't know *apriori*. For example, let's have a look what happens if we set the number of clusters to 2 in our synthetic 3-blob dataset:
###Code
kmeans = KMeans(n_clusters=2, random_state=random_state)
labels = kmeans.fit_predict(X)
plt.scatter(X[:, 0], X[:, 1], c=labels);
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], c='black', s=200, alpha=0.8)
plt.title("Incorrect Number of Blobs");
###Output
_____no_output_____
###Markdown
If our blobs are not distributed well, K means can also experience difficulties
###Code
transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred);
plt.title("Anisotropicly Distributed Blobs");
###Output
_____no_output_____
###Markdown
Another problem arises when we have the variances of the different classes in our data are unequal
###Code
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred);
plt.title("Unequal Variance");
###Output
_____no_output_____
###Markdown
However, K means can cope with different data representation of each class, meaning the different classes have a different number of data
###Code
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3,
random_state=random_state).fit_predict(X_filtered)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred);
plt.title("Unevenly Sized Blobs");
###Output
_____no_output_____
###Markdown
Other clustering algorithms As clustering is one of the main tasks in unsupervised learning, there are many algorithms for it beyond K means. Scikit-learn provides the following:- `sklearn.cluster.KMeans`- `sklearn.cluster.MeanShift`- `sklearn.cluster.DBSCAN`- `sklearn.cluster.AffinityPropagation`- `sklearn.cluster.SpectralClustering`- `sklearn.cluster.Ward`Of these, Ward, SpectralClustering, DBSCAN and Affinity propagation can also work with precomputed similarity matrices. EXERCISE: IRIS clustering: Perform K-means clustering on the iris data, searching for 3 clusters. How well does K-means match up with the actual clusters? Try clustering with other numbers.
###Code
from sklearn.datasets import load_iris
iris = load_iris()
x_index = 0
y_index = 1
colors = ['blue', 'red', 'green']
for label, color in zip(range(len(iris.target_names)), colors):
plt.scatter(iris.data[iris.target==label, x_index],
iris.data[iris.target==label, y_index],
label=iris.target_names[label],
c=color)
plt.xlabel(iris.feature_names[x_index])
plt.ylabel(iris.feature_names[y_index])
plt.legend(loc='upper left')
plt.show()
# %load sols/02_iris_clustering.py
# adjusted_rand_score(iris.target, labels)
###Output
_____no_output_____ |
docs/torchvision/finetuning_instance_segmentation/torchvision_finetuning_instance_segmentation.ipynb | ###Markdown
TorchVision Instance Segmentation Finetuning Tutorialhttps://pytorch.org/tutorials/intermediate/torchvision_tutorial.html
###Code
#conda activate pytorch
#!pip install -U 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'
###Output
_____no_output_____
###Markdown
Defining the DatasetThe [torchvision reference scripts for training object detection, instance segmentation and person keypoint detection](https://github.com/pytorch/vision/tree/v0.3.0/references/detection) allows for easily supporting adding new custom datasets.The dataset should inherit from the standard `torch.utils.data.Dataset` class, and implement `__len__` and `__getitem__`.The only specificity that we require is that the dataset `__getitem__` should return:* image: a PIL Image of size (H, W)* target: a dict containing the following fields * `boxes` (`FloatTensor[N, 4]`): the coordinates of the `N` bounding boxes in `[x0, y0, x1, y1]` format, ranging from `0` to `W` and `0` to `H` * `labels` (`Int64Tensor[N]`): the label for each bounding box * `image_id` (`Int64Tensor[1]`): an image identifier. It should be unique between all the images in the dataset, and is used during evaluation * `area` (`Tensor[N]`): The area of the bounding box. This is used during evaluation with the COCO metric, to separate the metric scores between small, medium and large boxes. * `iscrowd` (`UInt8Tensor[N]`): instances with `iscrowd=True` will be ignored during evaluation. * (optionally) `masks` (`UInt8Tensor[N, H, W]`): The segmentation masks for each one of the objects * (optionally) `keypoints` (`FloatTensor[N, K, 3]`): For each one of the `N` objects, it contains the `K` keypoints in `[x, y, visibility]` format, defining the object. `visibility=0` means that the keypoint is not visible. Note that for data augmentation, the notion of flipping a keypoint is dependent on the data representation, and you should probably adapt `references/detection/transforms.py` for your new keypoint representationIf your model returns the above methods, they will make it work for both training and evaluation, and will use the evaluation scripts from pycocotools.
###Code
!if [ ! -d 'PennFudanPed' ]; then \
wget https://www.cis.upenn.edu/~jshi/ped_html/PennFudanPed.zip .; \
unzip PennFudanPed.zip; \
fi
!tree -L 1 PennFudanPed
from PIL import Image
Image.open('PennFudanPed/PNGImages/FudanPed00001.png')
mask = Image.open('PennFudanPed/PedMasks/FudanPed00001_mask.png')
# each mask instance has a different color, from zero to N, where
# N is the number of instances. In order to make visualization easier,
# let's adda color palette to the mask.
mask.putpalette([
0, 0, 0, # black background
255, 0, 0, # index 1 is red
255, 255, 0, # index 2 is yellow
255, 153, 0, # index 3 is orange
])
mask
###Output
_____no_output_____
###Markdown
Let's write a `torch.utils.data.Dataset` class for this dataset.
###Code
import os
import numpy as np
import torch
import torch.utils.data
from PIL import Image
class PennFudanDataset(torch.utils.data.Dataset):
def __init__(self, root, transforms=None):
self.root = root
self.transforms = transforms
# load all image files, sorting them to
# ensure that they are aligned
self.imgs = list(sorted(os.listdir(os.path.join(root, "PNGImages"))))
self.masks = list(sorted(os.listdir(os.path.join(root, "PedMasks"))))
def __getitem__(self, idx):
# load images ad masks
img_path = os.path.join(self.root, "PNGImages", self.imgs[idx])
mask_path = os.path.join(self.root, "PedMasks", self.masks[idx])
img = Image.open(img_path).convert("RGB")
# note that we haven't converted the mask to RGB,
# because each color corresponds to a different instance
# with 0 being background
mask = Image.open(mask_path)
mask = np.array(mask)
# instances are encoded as different colors
obj_ids = np.unique(mask)
# first id is the background, so remove it
obj_ids = obj_ids[1:]
# split the color-encoded mask into a set
# of binary masks
masks = mask == obj_ids[:, None, None]
# get bounding box coordinates for each mask
num_objs = len(obj_ids)
boxes = []
for i in range(num_objs):
pos = np.where(masks[i])
xmin = np.min(pos[1])
xmax = np.max(pos[1])
ymin = np.min(pos[0])
ymax = np.max(pos[0])
boxes.append([xmin, ymin, xmax, ymax])
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# there is only one class
labels = torch.ones((num_objs,), dtype=torch.int64)
masks = torch.as_tensor(masks, dtype=torch.uint8)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
# suppose all instances are not crowd
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["masks"] = masks
target["image_id"] = image_id
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.imgs)
dataset = PennFudanDataset('PennFudanPed/')
dataset[0]
###Output
_____no_output_____
###Markdown
Defining your modelIn this tutorial, we will be using [Mask R-CNN](https://arxiv.org/abs/1703.06870), which is based on top of [Faster R-CNN](https://arxiv.org/abs/1506.01497). Faster R-CNN is a model that predicts both bounding boxes and class scores for potential objects in the image.Mask R-CNN adds an extra branch into Faster R-CNN, which also predicts segmentation masks for each instance.There are two common situations where one might want to modify one of the available models in torchvision modelzoo.The first is when we want to start from a pre-trained model, and just finetune the last layer. The other is when we want to replace the backbone of the model with a different one (for faster predictions, for example). 1 - Finetuning from a pretrained modelLet's suppose that you want to start from a model pre-trained on COCO and want to finetune it for your particular classes. Here is a possible way of doing it: ```pythonimport torchvisionfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor load a model pre-trained pre-trained on COCOmodel = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True) replace the classifier with a new one, that has num_classes which is user-definednum_classes = 2 1 class (person) + background get number of input features for the classifierin_features = model.roi_heads.box_predictor.cls_score.in_features replace the pre-trained head with a new onemodel.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)``` 2 - Modifying the model to add a different backboneAnother common situation arises when the user wants to replace the backbone of a detectionmodel with a different one. For example, the current default backbone (ResNet-50) might be too big for some applications, and smaller models might be necessary.Here is how we would go into leveraging the functions provided by torchvision to modify a backbone. ```pythonimport torchvisionfrom torchvision.models.detection import FasterRCNNfrom torchvision.models.detection.rpn import AnchorGenerator load a pre-trained model for classification and return only the featuresbackbone = torchvision.models.mobilenet_v2(pretrained=True).features FasterRCNN needs to know the number of output channels in a backbone. For mobilenet_v2, it's 1280 so we need to add it herebackbone.out_channels = 1280 let's make the RPN generate 5 x 3 anchors per spatial location, with 5 different sizes and 3 different aspect ratios. We have a Tuple[Tuple[int]] because each feature map could potentially have different sizes and aspect ratios anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) let's define what are the feature maps that we will use to perform the region of interest cropping, as well as the size of the crop after rescaling. if your backbone returns a Tensor, featmap_names is expected to be [0]. More generally, the backbone should return an OrderedDict[Tensor], and in featmap_names you can choose which feature maps to use.roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=7, sampling_ratio=2) put the pieces together inside a FasterRCNN modelmodel = FasterRCNN(backbone, num_classes=2, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler)``` An Instance segmentation model for PennFudan DatasetIn our case, we want to fine-tune from a pre-trained model, given that our dataset is very small. So we will be following approach number 1.Here we want to also compute the instance segmentation masks, so we will be using Mask R-CNN:
###Code
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
def get_instance_segmentation_model(num_classes):
# load an instance segmentation model pre-trained on COCO
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
# get the number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
# now get the number of input features for the mask classifier
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
# and replace the mask predictor with a new one
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
hidden_layer,
num_classes)
return model
###Output
_____no_output_____
###Markdown
That's it, this will make model be ready to be trained and evaluated on our custom dataset. Training and evaluation functionsIn `references/detection/,` we have a number of helper functions to simplify training and evaluating detection models.Here, we will use `references/detection/engine.py`, `references/detection/utils.py` and `references/detection/transforms.py`.```bashgit clone https://github.com/pytorch/vision.gitcd visiongit checkout v0.3.0cp references/detection/utils.py ../cp references/detection/transforms.py ../cp references/detection/coco_eval.py ../cp references/detection/engine.py ../cp references/detection/coco_utils.py ../```
###Code
!ls *.py
###Output
coco_eval.py coco_utils.py engine.py transforms.py utils.py
###Markdown
Let's write some helper functions for data augmentation / transformation, which leverages the functions in `refereces/detection` that we have just copied:
###Code
from engine import train_one_epoch, evaluate
import utils
import transforms as T
def get_transform(train):
transforms = []
# converts the image, a PIL image, into a PyTorch Tensor
transforms.append(T.ToTensor())
if train:
# during training, randomly flip the training images
# and ground-truth for data augmentation
transforms.append(T.RandomHorizontalFlip(0.5))
return T.Compose(transforms)
###Output
_____no_output_____
###Markdown
> Note that we do not need to add a mean/std normalization nor image rescaling in the data transforms, as those are handled internally by the Mask R-CNN model. Putting everything togetherWe now have the dataset class, the models and the data transforms. Let's instantiate them
###Code
# use our dataset and defined transformations
dataset = PennFudanDataset('PennFudanPed', get_transform(train=True))
dataset_test = PennFudanDataset('PennFudanPed', get_transform(train=False))
# split the dataset in train and test set
torch.manual_seed(1)
indices = torch.randperm(len(dataset)).tolist()
dataset = torch.utils.data.Subset(dataset, indices[:-50])
dataset_test = torch.utils.data.Subset(dataset_test, indices[-50:])
# define training and validation data loaders
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=2, shuffle=True, num_workers=4,
collate_fn=utils.collate_fn)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=1, shuffle=False, num_workers=4,
collate_fn=utils.collate_fn)
###Output
_____no_output_____
###Markdown
Now let's instantiate the model and the optimizer
###Code
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# our dataset has two classes only - background and person
num_classes = 2
# get the model using our helper function
model = get_instance_segmentation_model(num_classes)
# move model to the right device
model.to(device)
# construct an optimizer
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.005,
momentum=0.9, weight_decay=0.0005)
# and a learning rate scheduler which decreases the learning rate by
# 10x every 3 epochs
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=3,
gamma=0.1)
###Output
_____no_output_____
###Markdown
And now let's train the model for 10 epochs, evaluating at the end of every epoch.
###Code
# let's train it for 10 epochs
num_epochs = 10
for epoch in range(num_epochs):
# train for one epoch, printing every 10 iterations
train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=10)
# update the learning rate
lr_scheduler.step()
# evaluate on the test dataset
evaluate(model, data_loader_test, device=device)
###Output
/home/john/anaconda3/envs/pytorch/lib/python3.8/site-packages/torch/nn/functional.py:3103: UserWarning: The default behavior for interpolate/upsample with float scale_factor changed in 1.6.0 to align with other frameworks/libraries, and now uses scale_factor directly, instead of relying on the computed output size. If you wish to restore the old behavior, please set recompute_scale_factor=True. See the documentation of nn.Upsample for details.
warnings.warn("The default behavior for interpolate/upsample with float scale_factor changed "
###Markdown
Now that training has finished, let's have a look at what it actually predicts in a test image
###Code
# pick one image from the test set
img, _ = dataset_test[0]
# put the model in evaluation mode
model.eval()
with torch.no_grad():
prediction = model([img.to(device)])
###Output
_____no_output_____
###Markdown
Printing the prediction shows that we have a list of dictionaries. Each element of the list corresponds to a different image. As we have a single image, there is a single dictionary in the list.The dictionary contains the predictions for the image we passed. In this case, we can see that it contains `boxes`, `labels`, `masks` and `scores` as fields.
###Code
prediction
###Output
_____no_output_____
###Markdown
Let's inspect the image and the predicted segmentation masks.For that, we need to convert the image, which has been rescaled to 0-1 and had the channels flipped so that we have it in `[C, H, W]` format.
###Code
Image.fromarray(img.mul(255).permute(1, 2, 0).byte().numpy())
###Output
_____no_output_____
###Markdown
And let's now visualize the top predicted segmentation mask. The masks are predicted as `[N, 1, H, W]`, where `N` is the number of predictions, and are probability maps between 0-1.
###Code
Image.fromarray(prediction[0]['masks'][0, 0].mul(255).byte().cpu().numpy())
###Output
_____no_output_____ |
doc/gallery/cell-tag.ipynb | ###Markdown
This notebook is part of the `nbsphinx` documentation: https://nbsphinx.readthedocs.io/. Using a Cell Tag to Select a ThumbnailYou can select any code cell (with appropriate output)by tagging it with the `nbsphinx-thumbnail` tag.If there are multiple outputs in the selected cell,the last one is used.See [Choosing from Multiple Outputs](multiple-outputs.ipynb)for how to select a specific output.If you want to show a tooltip, have a look at[Using Cell Metadata to Select a Thumbnail](cell-metadata.ipynb).
###Code
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
The following cell has the `nbsphinx-thumbnail` tag:
###Code
fig, ax = plt.subplots(figsize=[6, 3])
ax.plot([4, 9, 7, 20, 6, 33, 13, 23, 16, 62, 8])
###Output
_____no_output_____ |
apache_beam_05_Combiner.ipynb | ###Markdown
CombineCombine is a Beam transform for combining collections of elements or values in your data. Combine has variants that work on entire PCollections, and some that combine the values for each key in PCollections of key/value pairs.When you apply a Combine transform, you must provide the function that contains the logic for combining the elements or values. The combining function should be **commutative** and **associative**, as the function is not necessarily invoked exactly once on all values with a given key. Because the input data (including the value collection) may be distributed across multiple workers, the combining function might be called multiple times to perform partial combining on subsets of the value collection. The Beam SDK also provides some pre-built combine functions for common numeric combination operations such as sum, min, and max.
###Code
import apache_beam as beam
class PercentagesFn(beam.CombineFn):
def create_accumulator(self):
return {}
def add_input(self, accumulator, input):
# accumulator == {}
# input == '🥕'
if input not in accumulator:
accumulator[input] = 0 # {'🥕': 0}
accumulator[input] += 1 # {'🥕': 1}
return accumulator
def merge_accumulators(self, accumulators):
# accumulators == [
# {'🥕': 1, '🍅': 2},
# {'🥕': 1, '🍅': 1, '🍆': 1},
# {'🥕': 1, '🍅': 3},
# ]
merged = {}
for accum in accumulators:
for item, count in accum.items():
if item not in merged:
merged[item] = 0
merged[item] += count
# merged == {'🥕': 3, '🍅': 6, '🍆': 1}
return merged
def extract_output(self, accumulator):
# accumulator == {'🥕': 3, '🍅': 6, '🍆': 1}
total = sum(accumulator.values()) # 10
percentages = {item: count / total for item, count in accumulator.items()}
# percentages == {'🥕': 0.3, '🍅': 0.6, '🍆': 0.1}
return percentages
with beam.Pipeline() as pipeline:
percentages = (
pipeline
| 'Create produce' >> beam.Create(['🥕', '🍅', '🍅', '🥕', '🍆', '🍅', '🍅', '🍅', '🥕', '🍅'])
| 'Get percentages' >> beam.CombineGlobally(PercentagesFn())
| beam.Map(print))
import apache_beam as beam
class AverageFn(beam.CombineFn):
def create_accumulator(self):
return (0.0, 0) # initialize (sum, count)
def add_input(self, sum_count, input):
(sum, count) = sum_count
return sum + input, count + 1
def merge_accumulators(self, accumulators):
ind_sums, ind_counts = zip(*accumulators) # zip - [(27, 3), (39, 3), (18, 2)] --> [(27,39,18), (3,3,2)]
return sum(ind_sums), sum(ind_counts) # (84,8)
def extract_output(self, sum_count):
(sum, count) = sum_count # combine globally using CombineFn
return sum / count if count else float('NaN')
with beam.Pipeline() as pipeline:
percentages = (
pipeline
| beam.Create([15,5,7,7,9,23,13,5])
| "Combine Globally" >> beam.CombineGlobally(AverageFn())
# | 'Write results' >> beam.io.WriteToText('output/combine')
| beam.Map(print)
)
###Output
10.5
|
OpenMM_toysystems/lennard_jones_toysystem.ipynb | ###Markdown
InfoArgon Jennard-Jones systems Targets* create a toy system with python. * create a `system` * create a `force` * create a `topology`
###Code
from simtk import openmm as mm
from simtk.openmm import app
from simtk.unit import *
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Input Let's define us an argon system. InteractionsFirst, we need to define some general parameters like `mass`, `charge` and the Lennard-Jones parameters `sigma` and `epsilon`.
###Code
mass = 39.9 * amu
charge = 0.0 * elementary_charge
sigma = 3.4 * angstroms
epsilon = 0.238 * kilocalories_per_mole
###Output
_____no_output_____
###Markdown
System parameterSecondly, we need to define the basics of our system, namely, the volume via `box_size` and the number of particles `n_particles`.
###Code
n_particles = 512
box_size = 50 * angstrom
###Output
_____no_output_____
###Markdown
Build the simulationLet's build our `Simulation` object.For this we need a `system`, `integrator` and `toplogy`. Create the systemThe first step in defining a `system` is to define particles with a given `mass` in the system.We can add these with `system.addParticle(mass)`.
###Code
# Create a system and add particles to it
system = mm.System()
for index in range(n_particles):
# Particles are added one at a time
# Their indices in the System will correspond with their indices in the Force objects we will add later
system.addParticle(mass)
###Output
_____no_output_____
###Markdown
Create the boxIf we do not want to do our simulation in a non-periodic box, we need to define the box it self.This is be done by defining the `box_vectors` spanning the box.
###Code
box_vectors = np.diag([box_size/angstrom for i in range(3)])*angstrom
system.setDefaultPeriodicBoxVectors(*box_vectors)
###Output
_____no_output_____
###Markdown
Define interactionsIf we do not want to simulate an ideal gas, we need some interactions in our system.We can add non-bonded interactions by using `openmm.NonbondedForce()`. This will create a `force` with Lennard-Jones **and** Coulomb interactions.We have to add **all** particles to the force. After we have define other parameters we can add this force to our system with `system.addForce(force)`.
###Code
# Add Lennard-Jones interactions using a NonbondedForce
force = mm.NonbondedForce()
force.setNonbondedMethod(mm.NonbondedForce.CutoffPeriodic)
# all particles must have parameters assigned for the NonbondedForce
for index in range(n_particles):
# Particles are assigned properties in the same order as they appear in the System object
force.addParticle(charge, sigma, epsilon)
force.setCutoffDistance(3.0 * sigma) # set cutoff (truncation) distance at 3*sigma
force.setUseSwitchingFunction(True) # use a smooth switching function to avoid force discontinuities at cutoff
force.setSwitchingDistance(2.5 * sigma) # turn on switch at 2.5*sigma
force.setUseDispersionCorrection(True) # use long-range isotropic dispersion correction
force_index = system.addForce(force) # system takes ownership of the NonbondedForce object
###Output
_____no_output_____
###Markdown
create an integratorAs we want to do a simulation and update our positions and velocities we need to add an `integrator` to our simulation.In this case, we will use a the **leap-frog integrator** by using `openmm.VerletIntegrator(timestep)`.
###Code
# Create an integrator
timestep = 1.0 * femtoseconds
integrator = mm.VerletIntegrator(timestep)
###Output
_____no_output_____
###Markdown
Create the toplogy At this point we can just create a `context` via ```pythoncontext = openmm.Context(system, integrator, platform)```but since we want to use the `Simulation` object (for all the nice features it comes with) we first have to create a `Topology`.It needs a `chain` and a `residue` to add an atom to it.The following code will create 1 `chain` and add all atoms a single `residues` to the `Topology`.
###Code
top = app.Topology()
chain = top.addChain()
for i in range(n_particles):
residue = top.addResidue(name='Ar', chain=chain, id=i)
top.addAtom('Ar',element=app.Element.getBySymbol('Ar') , residue=residue)
top.setPeriodicBoxVectors(box_vectors)
print(top)
###Output
_____no_output_____
###Markdown
create the simulationNow we have all ingredients to create a `Simulation` object.Note: the `integrator` is bound to the `Simulation` object and can **not** be reused in an other object. Hence, it has to be recreated.
###Code
simulation = app.Simulation(topology=top, system=system, integrator=integrator)
###Output
_____no_output_____
###Markdown
Define starting positions of the Lennard-Jones particlesOf cause we need positions to describe our system, as we have no idea about how a gas looks like, we create them as random positions using `np.random.rand(n_particles, 3) ` to create a uniform distribution of particles.Note: we have to scale it to the box size as we get numbers between `0` to `1`.We can then set the coordinates of our simulation using `simulation.context.setPositions(positions)`.
###Code
positions = box_size * np.random.rand(n_particles, 3)
positions
simulation.context.setPositions(positions)
###Output
_____no_output_____
###Markdown
Run the simulationNow we are ready to go to do a simulation.But, there are a few things which have to be done first.* minimize the system* write output* set velocities MinimizeWe have to minimize the system to avoid an overlap of atoms.
###Code
simulation.minimizeEnergy()
###Output
_____no_output_____
###Markdown
Add reporter for the outputWe have to add `reporters` to the `simulation` to generate output otherwise we can simulate the system but wont be able to follow it. You can easily add `reporters` by `append` them to the list `simulation.reporters`.
###Code
simulation.reporters.append(app.StateDataReporter('thermo.argon.csv', 100,
step=True,
potentialEnergy=True,
kineticEnergy=True,
totalEnergy=True,
temperature=True,
))
simulation.reporters.append(app.DCDReporter("trajectory.argon.dcd", 100, enforcePeriodicBox=True))
###Output
_____no_output_____
###Markdown
set velocitiesWe want to set a starting temperature for our simulation. We can do this by either using `simulation.context.setVelocitiesT(velocities)` or `simulation.context.setVelocitiesToTemperature(temperature)`. The first uses a `(n_particles, 3)` array and sets these velocities the later creates velocities for a given `temperature`.
###Code
simulation.context.setVelocitiesToTemperature(300 * kelvin)
###Output
_____no_output_____
###Markdown
Run the simulationNow, we can finally run the simulation by using `simulation.step(n_steps)` to run it for `n_steps`.
###Code
simulation.step(10000)
###Output
_____no_output_____
###Markdown
Visualize the simulationThis part is now no longer about simulation it self but just about a quick visualization in the `jupyter-notebook`.Of cause you can also use your favorite viewer to have a look at the trajectory.e.g for VMD:```tclvmd > set n_particles 512vmd > mol new atoms ${n_particles}vmd > mol representation VDW 1.0 12vmd > mol addrep topvmd > mol addfile trajectory.argon.dcd waitfor all``` Load `mdtraj` and `nglview` to load the trajectory and render it.
###Code
import mdtraj
import nglview
###Output
_____no_output_____
###Markdown
Load the trajectory.Note: we can use `mdtraj.Topology.from_openmm(simulation.topology)` to directly get the trajectory from the OpenMM `simulation` object.
###Code
mdtraj_topology = mdtraj.Topology.from_openmm(simulation.topology)
traj = mdtraj.load_dcd('trajectory.argon.dcd', mdtraj_topology)
###Output
_____no_output_____
###Markdown
Visualize it.
###Code
view = nglview.show_mdtraj(traj) # gui=True for more options
# view.add_ball_and_stick('all') # smaller balls
view.add_representation("spacefill")
view.add_unitcell()
# update camera type
view.camera = 'orthographic'
view.center()
view
###Output
_____no_output_____
###Markdown
Analysis of thermodynamics Let's have a first look into our `csv` file.
###Code
# print the first line of the file
print(open('thermo.argon.csv', 'r').readline())
###Output
_____no_output_____
###Markdown
As you can see, the properties are listed with the corresponding unit.You can easily load a `csv` file using `np.genfromtxt(filename, delimiter=',')`.A good practice for handling these files is to use the `names=True` argument.This will assign the column names to the fields of the numpy array.Now, you can no longer use indices as `data[0]` for the first entry but have to use `data['Potential_Energy_kJmole']` as in dictionaries. This may seems stupid at the beginning, but as the order of columns changes dependent on what you save into them, it is the best way to ensure to get the correct files.It also improves readability of the code!
###Code
csv = np.genfromtxt('thermo.argon.csv', delimiter=',', names=True)
print("Fields : {}".format(csv.dtype.names))
###Output
_____no_output_____
###Markdown
Let's plot something.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
plt.title('Potential Energy')
plt.plot(csv['Step'], csv['Potential_Energy_kJmole'])
plt.xlabel('Step')
plt.ylabel('Potential Energy [kj/mole]')
plt.title('Temperature')
plt.plot(csv['Step'], csv['Temperature_K'])
plt.xlabel('Step')
plt.ylabel('Temperature [K]')
###Output
_____no_output_____ |
course_2/course_material/Part_7_Deep_Learning/S54_L390/6. TensorFlow_MNIST_Batch_size_Part_1_Solution.ipynb | ###Markdown
Exercises 6. Adjust the batch size. Try a batch size of 1000. How does the required time change? What about the accuracy?**Solution**Find the line that declares the batch size.Change batch_size from 100 to 1000. batch_size = 1000 A bigger batch size results in slower training. That's what we expected from the theory. We are taking advantage of batching because of the amazing speed increase.Notice that the validation accuracy starts from a low number. That's because there are **fewer** updates in a single epoch.*Try a batch size of 30,000 or 50,000. That's very close to single batch GD for this problem. What do you think about the speed?You will need to change the max epochs to 100 (for instance), as 15 epochs won't be enough to train the model. What do you think about the speed of optimization?* Deep Neural Network for MNIST ClassificationWe'll apply all the knowledge from the lectures in this section to write a deep neural network. The problem we've chosen is referred to as the "Hello World" for machine learning because for most students it is their first example. The dataset is called MNIST and refers to handwritten digit recognition. You can find more about it on Yann LeCun's website (Director of AI Research, Facebook). He is one of the pioneers of what we've been talking about and of more complex approaches that are widely used today, such as covolutional networks. The dataset provides 28x28 images of handwritten digits (1 per image) and the goal is to write an algorithm that detects which digit is written. Since there are only 10 digits, this is a classification problem with 10 classes. In order to exemplify what we've talked about in this section, we will build a network with 2 hidden layers between inputs and outputs. Import the relevant packages
###Code
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# TensorFLow includes a data provider for MNIST that we'll use.
# This function automatically downloads the MNIST dataset to the chosen directory.
# The dataset is already split into training, validation, and test subsets.
# Furthermore, it preprocess it into a particularly simple and useful format.
# Every 28x28 image is flattened into a vector of length 28x28=784, where every value
# corresponds to the intensity of the color of the corresponding pixel.
# The samples are grayscale (but standardized from 0 to 1), so a value close to 0 is almost white and a value close to
# 1 is almost purely black. This representation (flattening the image row by row into
# a vector) is slightly naive but as you'll see it works surprisingly well.
# Since this is a classification problem, our targets are categorical.
# Recall from the lecture on that topic that one way to deal with that is to use one-hot encoding.
# With it, the target for each individual sample is a vector of length 10
# which has nine 0s and a single 1 at the position which corresponds to the correct answer.
# For instance, if the true answer is "1", the target will be [0,0,0,1,0,0,0,0,0,0] (counting from 0).
# Have in mind that the very first time you execute this command it might take a little while to run
# because it has to download the whole dataset. Following commands only extract it so they're faster.
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
###Output
_____no_output_____
###Markdown
Outline the modelThe whole code is in one cell, so you can simply rerun this cell (instead of the whole notebook) and train a new model.The tf.reset_default_graph() function takes care of clearing the old parameters. From there on, a completely new training starts.
###Code
input_size = 784
output_size = 10
# Use same hidden layer size for both hidden layers. Not a necessity.
hidden_layer_size = 50
# Reset any variables left in memory from previous runs.
tf.reset_default_graph()
# As in the previous example - declare placeholders where the data will be fed into.
inputs = tf.placeholder(tf.float32, [None, input_size])
targets = tf.placeholder(tf.float32, [None, output_size])
# Weights and biases for the first linear combination between the inputs and the first hidden layer.
# Use get_variable in order to make use of the default TensorFlow initializer which is Xavier.
weights_1 = tf.get_variable("weights_1", [input_size, hidden_layer_size])
biases_1 = tf.get_variable("biases_1", [hidden_layer_size])
# Operation between the inputs and the first hidden layer.
# We've chosen ReLu as our activation function. You can try playing with different non-linearities.
outputs_1 = tf.nn.relu(tf.matmul(inputs, weights_1) + biases_1)
# Weights and biases for the second linear combination.
# This is between the first and second hidden layers.
weights_2 = tf.get_variable("weights_2", [hidden_layer_size, hidden_layer_size])
biases_2 = tf.get_variable("biases_2", [hidden_layer_size])
# Operation between the first and the second hidden layers. Again, we use ReLu.
outputs_2 = tf.nn.relu(tf.matmul(outputs_1, weights_2) + biases_2)
# Weights and biases for the final linear combination.
# That's between the second hidden layer and the output layer.
weights_3 = tf.get_variable("weights_3", [hidden_layer_size, output_size])
biases_3 = tf.get_variable("biases_3", [output_size])
# Operation between the second hidden layer and the final output.
# Notice we have not used an activation function because we'll use the trick to include it directly in
# the loss function. This works for softmax and sigmoid with cross entropy.
outputs = tf.matmul(outputs_2, weights_3) + biases_3
# Calculate the loss function for every output/target pair.
# The function used is the same as applying softmax to the last layer and then calculating cross entropy
# with the function we've seen in the lectures. This function, however, combines them in a clever way,
# which makes it both faster and more numerically stable (when dealing with very small numbers).
# Logits here means: unscaled probabilities (so, the outputs, before they are scaled by the softmax)
# Naturally, the labels are the targets.
loss = tf.nn.softmax_cross_entropy_with_logits(logits=outputs, labels=targets)
# Get the average loss
mean_loss = tf.reduce_mean(loss)
# Define the optimization step. Using adaptive optimizers such as Adam in TensorFlow
# is as simple as that.
optimize = tf.train.AdamOptimizer(learning_rate=0.001).minimize(mean_loss)
# Get a 0 or 1 for every input in the batch indicating whether it output the correct answer out of the 10.
out_equals_target = tf.equal(tf.argmax(outputs, 1), tf.argmax(targets, 1))
# Get the average accuracy of the outputs.
accuracy = tf.reduce_mean(tf.cast(out_equals_target, tf.float32))
# Declare the session variable.
sess = tf.InteractiveSession()
# Initialize the variables. Default initializer is Xavier.
initializer = tf.global_variables_initializer()
sess.run(initializer)
# Batching
batch_size = 1000
# Calculate the number of batches per epoch for the training set.
batches_number = mnist.train._num_examples // batch_size
# Basic early stopping. Set a miximum number of epochs.
max_epochs = 15
# Keep track of the validation loss of the previous epoch.
# If the validation loss becomes increasing, we want to trigger early stopping.
# We initially set it at some arbitrarily high number to make sure we don't trigger it
# at the first epoch
prev_validation_loss = 9999999.
import time
start_time = time.time()
# Create a loop for the epochs. Epoch_counter is a variable which automatically starts from 0.
for epoch_counter in range(max_epochs):
# Keep track of the sum of batch losses in the epoch.
curr_epoch_loss = 0.
# Iterate over the batches in this epoch.
for batch_counter in range(batches_number):
# Input batch and target batch are assigned values from the train dataset, given a batch size
input_batch, target_batch = mnist.train.next_batch(batch_size)
# Run the optimization step and get the mean loss for this batch.
# Feed it with the inputs and the targets we just got from the train dataset
_, batch_loss = sess.run([optimize, mean_loss],
feed_dict={inputs: input_batch, targets: target_batch})
# Increment the sum of batch losses.
curr_epoch_loss += batch_loss
# So far curr_epoch_loss contained the sum of all batches inside the epoch
# We want to find the average batch losses over the whole epoch
# The average batch loss is a good proxy for the current epoch loss
curr_epoch_loss /= batches_number
# At the end of each epoch, get the validation loss and accuracy
# Get the input batch and the target batch from the validation dataset
input_batch, target_batch = mnist.validation.next_batch(mnist.validation._num_examples)
# Run without the optimization step (simply forward propagate)
validation_loss, validation_accuracy = sess.run([mean_loss, accuracy],
feed_dict={inputs: input_batch, targets: target_batch})
# Print statistics for the current epoch
# Epoch counter + 1, because epoch_counter automatically starts from 0, instead of 1
# We format the losses with 3 digits after the dot
# We format the accuracy in percentages for easier interpretation
print('Epoch '+str(epoch_counter+1)+
'. Mean loss: '+'{0:.3f}'.format(curr_epoch_loss)+
'. Validation loss: '+'{0:.3f}'.format(validation_loss)+
'. Validation accuracy: '+'{0:.2f}'.format(validation_accuracy * 100.)+'%')
# Trigger early stopping if validation loss begins increasing.
if validation_loss > prev_validation_loss:
break
# Store this epoch's validation loss to be used as previous validation loss in the next iteration.
prev_validation_loss = validation_loss
# Not essential, but it is nice to know when the algorithm stopped working in the output section, rather than check the kernel
print('End of training.')
#Add the time it took the algorithm to train
print("Training time: %s seconds" % (time.time() - start_time))
###Output
_____no_output_____
###Markdown
Test the modelAs we discussed in the lectures, after training on the training and validation sets, we test the final prediction power of our model by running it on the test dataset that the algorithm has not seen before.It is very important to realize that fiddling with the hyperparameters overfits the validation dataset. The test is the absolute final instance. You should not test before you are completely done with adjusting your model.
###Code
input_batch, target_batch = mnist.test.next_batch(mnist.test._num_examples)
test_accuracy = sess.run([accuracy],
feed_dict={inputs: input_batch, targets: target_batch})
# Test accuracy is a list with 1 value, so we want to extract the value from it, using x[0]
# Uncomment the print to see how it looks before the manipulation
# print (test_accuracy)
test_accuracy_percent = test_accuracy[0] * 100.
# Print the test accuracy formatted in percentages
print('Test accuracy: '+'{0:.2f}'.format(test_accuracy_percent)+'%')
###Output
_____no_output_____ |
2022/Python-Tricks-Book/Chapter4-Classes-OOP/cloning-objects.ipynb | ###Markdown
Cloning Objects for Fun and Profit Making Shallow copies
###Code
xs = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
ys = list(xs) # Make A Shallow Copy
print(xs)
print(ys)
xs[1][0] = '?'
print(xs)
print(ys)
###Output
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
[[1, 2, 3], ['?', 5, 6], [7, 8, 9]]
[[1, 2, 3], ['?', 5, 6], [7, 8, 9]]
###Markdown
Making Deep copies
###Code
import copy
xs = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
zs = copy.deepcopy(xs)
print(xs)
print(zs)
xs[1][0] = 'X'
print(xs)
print(zs)
###Output
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
[[1, 2, 3], ['X', 5, 6], [7, 8, 9]]
|
PY0101EN_2_3_Sets.ipynb | ###Markdown
Sets in Python Welcome! This notebook will teach you about the sets in the Python Programming Language. By the end of this lab, you'll know the basics set operations in Python, including what it is, operations and logic operations. Table of Contents Sets Set Content Set Operations Sets Logic Operations Quiz on Sets Estimated time needed: 20 min Sets Set Content A set is a unique collection of objects in Python. You can denote a set with a curly bracket {}. Python will automatically remove duplicate items:
###Code
# Create a set
set1 = {"pop", "rock", "soul", "hard rock", "rock", "R&B", "rock", "disco"}
set1
###Output
_____no_output_____
###Markdown
The process of mapping is illustrated in the figure: You can also create a set from a list as follows:
###Code
# Convert list to set
album_list = [ "Michael Jackson", "Thriller", 1982, "00:42:19", \
"Pop, Rock, R&B", 46.0, 65, "30-Nov-82", None, 10.0]
album_set = set(album_list)
album_set
###Output
_____no_output_____
###Markdown
Now let us create a set of genres:
###Code
# Convert list to set
music_genres = set(["pop", "pop", "rock", "folk rock", "hard rock", "soul", \
"progressive rock", "soft rock", "R&B", "disco"])
music_genres
###Output
_____no_output_____
###Markdown
Set Operations Let us go over set operations, as these can be used to change the set. Consider the set A:
###Code
# Sample set
A = set(["Thriller", "Back in Black", "AC/DC"])
A
###Output
_____no_output_____
###Markdown
We can add an element to a set using the add() method:
###Code
# Add element to set
A.add("NSYNC")
A
###Output
_____no_output_____
###Markdown
If we add the same element twice, nothing will happen as there can be no duplicates in a set:
###Code
# Try to add duplicate element to the set
A.add("NSYNC")
A
###Output
_____no_output_____
###Markdown
We can remove an item from a set using the remove method:
###Code
# Remove the element from set
A.remove("NSYNC")
A
###Output
_____no_output_____
###Markdown
We can verify if an element is in the set using the in command:
###Code
# Verify if the element is in the set
"AC/DC" in A
###Output
_____no_output_____
###Markdown
Sets Logic Operations Remember that with sets you can check the difference between sets, as well as the symmetric difference, intersection, and union: Consider the following two sets:
###Code
# Sample Sets
album_set1 = set(["Thriller", 'AC/DC', 'Back in Black'])
album_set2 = set([ "AC/DC", "Back in Black", "The Dark Side of the Moon"])
###Output
_____no_output_____
###Markdown
###Code
# Print two sets
album_set1, album_set2
###Output
_____no_output_____
###Markdown
As both sets contain AC/DC and Back in Black we represent these common elements with the intersection of two circles. You can find the intersect of two sets as follow using &:
###Code
# Find the intersections
intersection = album_set1 & album_set2
intersection
###Output
_____no_output_____
###Markdown
You can find all the elements that are only contained in album_set1 using the difference method:
###Code
# Find the difference in set1 but not set2
album_set1.difference(album_set2)
###Output
_____no_output_____
###Markdown
You only need to consider elements in album_set1; all the elements in album_set2, including the intersection, are not included. The elements in album_set2 but not in album_set1 is given by:
###Code
album_set2.difference(album_set1)
###Output
_____no_output_____
###Markdown
You can also find the intersection of album_list1 and album_list2, using the intersection method:
###Code
# Use intersection method to find the intersection of album_list1 and album_list2
album_set1.intersection(album_set2)
###Output
_____no_output_____
###Markdown
This corresponds to the intersection of the two circles: The union corresponds to all the elements in both sets, which is represented by coloring both circles: The union is given by:
###Code
# Find the union of two sets
album_set1.union(album_set2)
###Output
_____no_output_____
###Markdown
And you can check if a set is a superset or subset of another set, respectively, like this:
###Code
# Check if superset
set(album_set1).issuperset(album_set2)
# Check if subset
set(album_set2).issubset(album_set1)
###Output
_____no_output_____
###Markdown
Here is an example where issubset() and issuperset() return true:
###Code
# Check if subset
set({"Back in Black", "AC/DC"}).issubset(album_set1)
# Check if superset
album_set1.issuperset({"Back in Black", "AC/DC"})
###Output
_____no_output_____
###Markdown
Quiz on Sets Convert the list ['rap','house','electronic music', 'rap'] to a set:
###Code
# Write your code below and press Shift+Enter to execute
sets = set(['rap', 'house', 'electronic music', 'rap'])
sets
###Output
_____no_output_____
###Markdown
Double-click __here__ for the solution.<!-- Your answer is below:set(['rap','house','electronic music','rap'])--> Consider the list A = [1, 2, 2, 1] and set B = set([1, 2, 2, 1]), does sum(A) = sum(B)
###Code
# Write your code below and press Shift+Enter to execute
A = [1, 2, 2, 1]
B = set([1, 2, 2, 1])
sum(A) & sum(B)
###Output
_____no_output_____
###Markdown
Double-click __here__ for the solution.<!-- Your answer is below:A = [1, 2, 2, 1] B = set([1, 2, 2, 1])print("the sum of A is:", sum(A))print("the sum of B is:", sum(B))--> Create a new set album_set3 that is the union of album_set1 and album_set2:
###Code
# Write your code below and press Shift+Enter to execute
album_set1 = set(["Thriller", 'AC/DC', 'Back in Black'])
album_set2 = set([ "AC/DC", "Back in Black", "The Dark Side of the Moon"])
album_set3 = album_set1.union(album_set2)
album_set3
###Output
_____no_output_____
###Markdown
Double-click __here__ for the solution.<!-- Your answer is below:album_set3 = album_set1.union(album_set2)album_set3--> Find out if album_set1 is a subset of album_set3:
###Code
# Write your code below and press Shift+Enter to execute
album_set1.issubset(album_set3)
###Output
_____no_output_____
###Markdown
Sets in Python Welcome! This notebook will teach you about the sets in the Python Programming Language. By the end of this lab, you'll know the basics set operations in Python, including what it is, operations and logic operations. Table of Contents Sets Set Content Set Operations Sets Logic Operations Quiz on Sets Estimated time needed: 20 min Sets Set Content A set is a unique collection of objects in Python. You can denote a set with a curly bracket {}. Python will automatically remove duplicate items:
###Code
# Create a set
set1 = {"pop", "rock", "soul", "hard rock", "rock", "R&B", "rock", "disco"}
set1
###Output
_____no_output_____
###Markdown
The process of mapping is illustrated in the figure: You can also create a set from a list as follows:
###Code
# Convert list to set
album_list = [ "Michael Jackson", "Thriller", 1982, "00:42:19", \
"Pop, Rock, R&B", 46.0, 65, "30-Nov-82", None, 10.0]
album_set = set(album_list)
album_set
###Output
_____no_output_____
###Markdown
Now let us create a set of genres:
###Code
# Convert list to set
music_genres = set(["pop", "pop", "rock", "folk rock", "hard rock", "soul", \
"progressive rock", "soft rock", "R&B", "disco"])
music_genres
###Output
_____no_output_____
###Markdown
Set Operations Let us go over set operations, as these can be used to change the set. Consider the set A:
###Code
# Sample set
A = set(["Thriller", "Back in Black", "AC/DC"])
A
###Output
_____no_output_____
###Markdown
We can add an element to a set using the add() method:
###Code
# Add element to set
A.add("NSYNC")
A
###Output
_____no_output_____
###Markdown
If we add the same element twice, nothing will happen as there can be no duplicates in a set:
###Code
# Try to add duplicate element to the set
A.add("NSYNC")
A
###Output
_____no_output_____
###Markdown
We can remove an item from a set using the remove method:
###Code
# Remove the element from set
A.remove("NSYNC!")
A
###Output
_____no_output_____
###Markdown
We can verify if an element is in the set using the in command:
###Code
# Verify if the element is in the set
"NSYNC" in A
###Output
_____no_output_____
###Markdown
Sets Logic Operations Remember that with sets you can check the difference between sets, as well as the symmetric difference, intersection, and union: Consider the following two sets:
###Code
# Sample Sets
album_set1 = set(["Thriller", 'AC/DC', 'Back in Black'])
album_set2 = set([ "AC/DC", "Back in Black", "The Dark Side of the Moon"])
###Output
_____no_output_____
###Markdown
###Code
# Print two sets
album_set1, album_set2
###Output
_____no_output_____
###Markdown
As both sets contain AC/DC and Back in Black we represent these common elements with the intersection of two circles. You can find the intersect of two sets as follow using &:
###Code
# Find the intersections
intersection = album_set1 & album_set2
intersection
###Output
_____no_output_____
###Markdown
You can find all the elements that are only contained in album_set1 using the difference method:
###Code
# Find the difference in set1 but not set2
album_set1.difference(album_set2)
###Output
_____no_output_____
###Markdown
You only need to consider elements in album_set1; all the elements in album_set2, including the intersection, are not included. The elements in album_set2 but not in album_set1 is given by:
###Code
album_set2.difference(album_set1)
###Output
_____no_output_____
###Markdown
You can also find the intersection of album_list1 and album_list2, using the intersection method:
###Code
# Use intersection method to find the intersection of album_list1 and album_list2
album_set1.intersection(album_set2)
###Output
_____no_output_____
###Markdown
This corresponds to the intersection of the two circles: The union corresponds to all the elements in both sets, which is represented by coloring both circles: The union is given by:
###Code
# Find the union of two sets
album_set1.union(album_set2)
###Output
_____no_output_____
###Markdown
And you can check if a set is a superset or subset of another set, respectively, like this:
###Code
# Check if superset
set(album_set1).issuperset(album_set2)
# Check if subset
set(album_set2).issubset(album_set1)
###Output
_____no_output_____
###Markdown
Here is an example where issubset() and issuperset() return true:
###Code
# Check if subset
set({"Back in Black", "AC/DC"}).issubset(album_set1)
# Check if superset
album_set1.issuperset({"Back in Black", "AC/DC"})
###Output
_____no_output_____
###Markdown
Quiz on Sets Convert the list ['rap','house','electronic music', 'rap'] to a set:
###Code
# Write your code below and press Shift+Enter to execute
sets = set(['rap', 'house', 'electronic music', 'rap'])
sets
###Output
_____no_output_____
###Markdown
Double-click __here__ for the solution.<!-- Your answer is below:set(['rap','house','electronic music','rap'])--> Consider the list A = [1, 2, 2, 1] and set B = set([1, 2, 2, 1]), does sum(A) = sum(B)
###Code
A = [1,2,2,1]
B = set([1,2,2,1])
pi
print("sum A is : ", sum(A))
print("sum B is : ",sum(B))
###Output
sum A is : 6
sum B is : 3
###Markdown
Double-click __here__ for the solution.<!-- Your answer is below:A = [1, 2, 2, 1] B = set([1, 2, 2, 1])print("the sum of A is:", sum(A))print("the sum of B is:", sum(B))--> Create a new set album_set3 that is the union of album_set1 and album_set2:
###Code
# Write your code below and press Shift+Enter to execute
album_set1 = set(["Thriller", 'AC/DC', 'Back in Black'])
album_set2 = set([ "AC/DC", "Back in Black", "The Dark Side of the Moon"])
album_set3 = album_set1.union(album_set2)
album_set3
###Output
_____no_output_____
###Markdown
Double-click __here__ for the solution.<!-- Your answer is below:album_set3 = album_set1.union(album_set2)album_set3--> Find out if album_set1 is a subset of album_set3:
###Code
# Write your code below and press Shift+Enter to execute
album_set1.issubset(album_set3)
###Output
_____no_output_____ |
dog_and_cat_classifier/src/Image classification(Dog vs Cat) from scratch.ipynb | ###Markdown
Building a model
###Code
img_size = (180,180)
def build_model():
input_tensor = keras.Input(shape=(180,180,3), dtype='float32')
###Output
_____no_output_____
###Markdown
Training
###Code
EPOCHS=50
checkpoint_filepath = './{epoch}.h5'
callbacks = keras.callbacks.ModelCheckpoint(
filepath = checkpoint_filepath,
monitor = 'val_loss',
mode = 'min',
save_best_only=True,
save_weights_only=True)
model.compile(
optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy']
)
model.fit(
train_ds,
epochs=EPOCHS,
callbacks=callbacks,
validation_data=val_ds
)
###Output
_____no_output_____ |
docs/notebooks/09_plugins.ipynb | ###Markdown
Plugins Lumerical FDTD Sparametersgdsfactory provides you with a Lumerical FDTD interface to calculate Sparameters automatically (without you having to click around the Lumerical GUI)The function `gdsfactory.simulation.write_sparameters_lumerical` brings up a GUI, runs simulation and then writes the Sparameters both in .CSV and .DAT (Lumerical interconnect / simphony) file formats, as well as the simulation settings in YAML format.In the CSV format each Sparameter will have 2 columns, `S12m` where `m` stands for magnitude and `S12a` where `a` stands for angle in radians. (S11a, S11m, ...)For the simulation to work well, your components need to have ports, that will be extended automatically to go over the PML.The script calls internally the lumerical python API `lumapi` so you will need to make sure that you can run this from python.```pythonimport lumapisession = luampi.FDTD()```In linux that may require you to export the PYTHONPATH variable in your shell environment.You can add one line into your `.bashrc` in your Linux machine. This line will depend also on your Lumerical version. For example for Lumerical 2019b```bash[ -d "/opt/lumerical/2019b" ] && export PATH=$PATH:/opt/lumerical/2019b/bin && export PYTHONPATH=/opt/lumerical/2019b/api/python```And for 2021v212```bash[ -d "/opt/lumerical/v212" ] && export PATH=$PATH:/opt/lumerical/v212/api/python/bin && export PYTHONPATH=/opt/lumerical/v212/api/python``` I reccommend that you adapt this functions with your :- simulation settings (wavelength range, mesh)- LayerStack: GDS layer and thickness/material/zmin of each layer- dirpath: directory path to write and read the Sparameters
###Code
import gdsfactory as gf
import gdsfactory.simulation as sim
import gdsfactory.samples.pdk.fab_c as pdk
write_sparameters_lumerical = gf.partial(
sim.write_sparameters_lumerical,
layer_stack=pdk.LAYER_STACK,
dirpath=pdk.SPARAMETERS_PATH,
)
plot_sparameters = gf.partial(
sim.plot.plot_sparameters,
dirpath=pdk.SPARAMETERS_PATH,
write_sparameters_function=write_sparameters_lumerical,
)
read_sparameters_pandas = gf.partial(
sim.read_sparameters_pandas,
layer_stack=pdk.LAYER_STACK,
dirpath=pdk.SPARAMETERS_PATH,
)
###Output
_____no_output_____
###Markdown
Plugins Lumerical FDTD Sparametersgdsfactory provides you with a Lumerical FDTD interface to calculate Sparameters automatically (without you having to click around the Lumerical GUI)The function `gdsfactory.simulation.write_sparameters_lumerical` brings up a GUI, runs simulation and then writes the Sparameters both in .CSV and .DAT (Lumerical interconnect / simphony) file formats, as well as the simulation settings in YAML format.In the CSV format each Sparameter will have 2 columns, `S12m` where `m` stands for magnitude and `S12a` where `a` stands for angle in radians. (S11a, S11m, ...)For the simulation to work well, your components need to have ports, that will be extended automatically to go over the PML.The script calls internally the lumerical python API `lumapi` so you will need to make sure that you can run this from python.```pythonimport lumapisession = luampi.FDTD()```In linux that may require you to export the PYTHONPATH variable in your shell environment.You can add one line into your `.bashrc` in your Linux machine. This line will depend also on your Lumerical version. For example for Lumerical 2019b```bash[ -d "/opt/lumerical/2019b" ] && export PATH=$PATH:/opt/lumerical/2019b/bin && export PYTHONPATH=/opt/lumerical/2019b/api/python```And for 2021v212```bash[ -d "/opt/lumerical/v212" ] && export PATH=$PATH:/opt/lumerical/v212/api/python/bin && export PYTHONPATH=/opt/lumerical/v212/api/python``` I reccommend that you adapt this functions with your :- simulation settings (wavelength range, mesh)- LayerStack: GDS layer and thickness/material/zmin of each layer- dirpath: directory path to write and read the Sparameters
###Code
import gdsfactory as gf
import gdsfactory.simulation as sim
import gdsfactory.samples.pdk.fab_c as pdk
write_sparameters_lumerical = gf.partial(
sim.write_sparameters_lumerical,
layer_stack=pdk.LAYER_STACK,
dirpath=pdk.SPARAMETERS_PATH,
)
plot_sparameters = gf.partial(
sim.plot.plot_sparameters,
dirpath=pdk.SPARAMETERS_PATH,
write_sparameters_function=write_sparameters_lumerical,
)
read_sparameters_pandas = gf.partial(
sim.read_sparameters_pandas,
layer_stack=pdk.LAYER_STACK,
dirpath=pdk.SPARAMETERS_PATH,
)
###Output
_____no_output_____ |
Fremont Bridge Bicycle Counter_Time_Series.ipynb | ###Markdown
visuvalization the data
###Code
import seaborn
seaborn.set()
data.plot()
plt.ylabel('Hourly Bicycle Count')
weekly=data.resample('W').sum()
weekly.head()
weekly.plot(style=['.-','--','-'])
plt.ylabel('weekly bicycle count')
weekly=data.resample('D').sum()
weekly.head()
weekly.plot(style=['.-','--','-'])
plt.ylabel('weekly bicycle count')
daily=data.resample('D').sum()
daily.head()
daily.plot(style=['.-','--','-'])
plt.ylabel('daily bicycle count')
daily.rolling(120,center=True).std().plot(style=[':','--','-'])#mean+std
plt.ylabel('mean hourly count')
daily.rolling(120,center=True).mean().plot(style=[':','--','-'])#mean+std
plt.ylabel('mean hourly count')
daily.rolling(50,center=True,win_type='gaussian').sum(std=10).plot(style=[':','--','-']);
daily.rolling(150,center=True,win_type='gaussian').sum(std=50).plot(style=[':','--','-']);
###Output
_____no_output_____
###Markdown
Digging into the data
###Code
by_time=data.groupby(data.index.time).sum()
by_time.head()
import numpy as np
plt.figure(figsize=(16,9))
hourly_ticks=4*60*60*np.arange(6)
by_time.plot(xticks=hourly_ticks,style=[':','--','-'])
by_weekday=data.groupby(data.index.dayofweek).sum()
by_weekday.head()
by_weekday.index=['Mon','Tues','Wed','Thurs','Fri','Sat','Sun']
by_weekday.plot(style=[':','--','-'])
weekend=np.where(data.index.weekday<5,'weekday','weekend')
weekend[0:50]
by_time=data.groupby([weekend,data.index.time]).mean()
by_time.head()
by_time.tail()
fig,ax=plt.subplots(1,2,figsize=(16,8))
by_time.loc['weekday'].plot(ax=ax[0],title='Weekdays',xticks=hourly_ticks,style=[':','--','-'])
by_time.loc['weekend'].plot(ax=ax[1],title='Weekend',xticks=hourly_ticks,style=[':','--','-'])
###Output
_____no_output_____ |
examples/simple_notebook/simple_wire.ipynb | ###Markdown
Load some 2D shapes from simple_wire.FCStd, and then build some a 3D structure out of them
###Code
back_gate = part_3d.ExtrudePart("back_gate", "Sketch", z0=-0.2, thickness=0.1)
vacuum = part_3d.ExtrudePart("vacuum", "Sketch003", z0=-0.5, thickness=1.0)
wire = part_3d.ExtrudePart("wire", "Sketch002", z0=0.0, thickness=0.1)
shell = part_3d.ExtrudePart("shell", "Sketch002", z0=0.1, thickness=0.05)
build_order = [wire, shell, back_gate, vacuum]
file_path = './simple_wire.FCStd'
geo_data = build_3d_geometry(input_parts=build_order, input_file=file_path,
xsec_dict={'central':{'axis':(1.,0.,0.),'distance':0.}})
geo_data.write_fcstd('built_geo.fcstd')
###Output
_____no_output_____
###Markdown
At this point you can try opening built_geo.fcstd with FreeCAD and taking a look at the built shape. Feel free to skip this step if you're unfamiliar with the FreeCAD GUI.We can check that our cross sections and parts are as expected:
###Code
geo_data.xsecs
geo_data.parts
###Output
_____no_output_____
###Markdown
Now we can do meshing. A mesh has already been pre built so you can just load it.
###Code
from qms.meshing import MeshPart, MeshData
# from qms.tasks import build_3d_mesh
# mesh_data = build_3d_mesh(
# geo_data,
# {
# "back_gate": MeshPart(mesh_max_size=0.1),
# "vacuum": MeshPart(mesh_max_size=0.05),
# "wire": MeshPart(mesh_max_size=0.01),
# "shell": MeshPart(mesh_max_size=0.01),
# },
# "comsol"
# )
# mesh_data.save("mesh_data.h5")
mesh_data = MeshData.load("mesh_data.h5")
###Output
_____no_output_____
###Markdown
Determine the reference level and Al work function to get a 0.1 meV band offset between InSb and Al:
###Code
mat_lib = Materials()
Al_WF = mat_lib['Al']['workFunction']
InSb_EA = mat_lib['InSb']['electronAffinity']
InSb_BG = mat_lib['InSb']['directBandGap']
InSb_VBO = mat_lib['InSb']['valenceBandOffset']
Al_WF_level = 0.0-(Al_WF)
InSb_CB_level = 0.0-InSb_EA+InSb_VBO
WF_shift = 200.*parse_unit('meV')-(Al_WF_level-InSb_CB_level)
new_Al_WF = (Al_WF-WF_shift)
ref_level = -new_Al_WF
mat_lib = make_materials_library({"Al":{"workFunction": new_Al_WF}})
mat_data = build_materials(geo_data,
{"back_gate": "Al", "vacuum": "air", "wire": "InSb", "shell": "Al"},
mat_lib)
###Output
_____no_output_____
###Markdown
Now we can run the electrostatic simulations. Again you can just load the results.
###Code
# from qms.tasks.thomas_fermi import ThomasFermiPart
# from qms.tasks import run_3d_thomas_fermi
# import sympy.physics.units as spu
# logging.basicConfig(level=logging.INFO)
# tf_data = run_3d_thomas_fermi(geo_data,
# mesh_data,
# mat_data,
# {
# "back_gate": ThomasFermiPart("metal_gate", boundary_condition={"voltage": 0.0 * spu.V}),
# "vacuum": ThomasFermiPart("dielectric"),
# "wire": ThomasFermiPart("semiconductor"),
# "shell": ThomasFermiPart("metal_gate", boundary_condition={"voltage": 0.0 * spu.V}),
# },
# reference_level=ref_level,
# order=1,
# )
# tf_data.save("tf_data.h5")
from qms.physics import PoissonFem
tf_data = PoissonFem.load("tf_data.h5")
###Output
_____no_output_____
###Markdown
Great! Let's take a look at the potential profile to make sure it looks reasonable:
###Code
femcoords = tf_data.coordinates
femdata = tf_data.potential
xgrid, ygrid, zgrid = np.mgrid[0:1, -0.2:0.2:0.005, -0.5:0.5:0.0125]
plot_potential = griddata(femcoords, femdata, (xgrid, ygrid, zgrid), method='linear')
plt.pcolor(ygrid[0],zgrid[0],plot_potential[0])
plt.colorbar()
plt.show()
###Output
_____no_output_____
###Markdown
This looks fine. Let's now look at a line cut:
###Code
xgrid, ygrid, zgrid = np.mgrid[0:1, 0:1, -0.2:0.2:0.002]
potential_cut = griddata(femcoords, femdata, (xgrid, ygrid, zgrid), method='linear')
plt.plot(zgrid[0,0],potential_cut[0,0])
plt.show()
###Output
_____no_output_____
###Markdown
This all looks fine. In the region between 0.0 and 0.1, we have accumulation. Let's make sure this holds up when taking into account the conduction band offset:
###Code
xgrid, ygrid, zgrid = np.mgrid[0:1, 0:1, 0:0.1:0.0005]
potential_cut = griddata(femcoords, femdata, (xgrid, ygrid, zgrid), method='linear')
plt.plot(zgrid[0,0],potential_cut[0,0])
plt.show()
###Output
_____no_output_____
###Markdown
Putting in the proper band offsets, we get:
###Code
zvec = zgrid[0,0]
potential_cut = potential_cut[0,0]
offset_CB = to_float((InSb_CB_level-ref_level) /parse_unit('meV'))/1e3
offset_VB = offset_CB-InSb_BG/parse_unit('meV')/1e3
plt.plot(zvec,offset_CB-potential_cut)
plt.plot(zvec,offset_VB-potential_cut)
plt.plot(zvec,np.zeros(zvec.shape))
plt.show()
###Output
_____no_output_____
###Markdown
Load some 2D shapes from simple_wire.FCStd, and then build some a 3D structure out of them
###Code
back_gate = Part3DData('back_gate','Sketch','extrude',
domain_type='metal_gate',material='Al',
z0=-0.2,thickness=0.1,boundary_condition={'voltage':0.0},mesh_max_size=0.1)
vacuum = Part3DData('vacuum','Sketch003','extrude',
domain_type='dielectric',material='air',
z0=-0.5,thickness=1.0,mesh_max_size=0.05)
wire = Part3DData('wire','Sketch002','extrude',
domain_type='semiconductor',material='InSb',
z0 = 0.0,thickness=0.1,mesh_max_size=0.01)
shell = Part3DData('shell','Sketch002','extrude',
domain_type='metal_gate',material='Al',
z0 = 0.1,thickness=0.05,mesh_max_size=0.01,boundary_condition={'voltage':0.0})
build_order = [wire,shell,back_gate,vacuum]
file_path = './simple_wire.FCStd'
geo_data = build_3d_geometry(input_parts=build_order,input_file=file_path,
xsec_dict={'central':{'axis':(1.,0.,0.),'distance':0.}})
geo_data.write_fcstd('built_geo.fcstd')
###Output
_____no_output_____
###Markdown
At this point you can try opening built_geo.fcstd with FreeCAD and taking a look at the built shape. Feel free to skip this step if you're unfamiliar with the FreeCAD GUI.We can check that our cross sections and parts are as expected:
###Code
geo_data.xsecs
geo_data.parts
###Output
_____no_output_____
###Markdown
QMT can't do meshing, so we load a mesh from an example file
###Code
with open('mesh_data.pkl','rb') as file:
mesh_data = pickle.load(file)
###Output
_____no_output_____
###Markdown
Determine the reference level and Al work function to get a 0.1 meV band offset between InSb and Al:
###Code
mat_lib = Materials()
Al_WF = mat_lib['Al']['workFunction']
InSb_EA = mat_lib['InSb']['electronAffinity']
InSb_BG = mat_lib['InSb']['directBandGap']
InSb_VBO = mat_lib['InSb']['valenceBandOffset']
Al_WF_level = 0.0-(Al_WF)
InSb_CB_level = 0.0-InSb_EA+InSb_VBO
WF_shift = 100.*parse_unit('meV')-(Al_WF_level-InSb_CB_level)
new_Al_WF = (Al_WF-WF_shift)
ref_level = -new_Al_WF/parse_unit('meV')
###Output
_____no_output_____
###Markdown
You won't be able to run the simulation with QMT alone. Here we just load the file with simulation results
###Code
# logging.basicConfig(level=logging.INFO)
# tf_data = run_3d_thomas_fermi(mesh_data,
# reference_level=ref_level,
# material_properties={'Al':{'workFunction':new_Al_WF}},
# eunit='meV',volume_charge_integrals=['wire'],order=1)
with open('tf_data.pkl','rb') as file:
tf_data = pickle.load(file)
###Output
_____no_output_____
###Markdown
First, let's check that all of the boundary conditions are respected:
###Code
mesh_data.mesh_id_dict
node_vals = []
for i in range(len(mesh_data.mesh_regions)):
region_id = mesh_data.mesh_regions[i]
if region_id == 2 or region_id==4:
for node_idx in mesh_data.mesh_tets[i]:
node_vals += [tf_data.potential[node_idx]]
assert np.alltrue(np.array(node_vals)==0.0)
###Output
_____no_output_____
###Markdown
Great! Let's take a look at the potential profile to make sure it looks reasonable:
###Code
femcoords = tf_data.coordinates
femdata = tf_data.potential
xgrid, ygrid, zgrid = np.mgrid[0:1, -0.2:0.2:0.005, -0.5:0.5:0.0125]
plot_potential = griddata(femcoords, femdata, (xgrid, ygrid, zgrid), method='linear')
plt.pcolor(ygrid[0],zgrid[0],plot_potential[0])
plt.colorbar()
plt.show()
###Output
_____no_output_____
###Markdown
This looks fine. Let's now look at a line cut:
###Code
xgrid, ygrid, zgrid = np.mgrid[0:1, 0:1, -0.2:0.2:0.002]
potential_cut = griddata(femcoords, femdata, (xgrid, ygrid, zgrid), method='linear')
plt.plot(zgrid[0,0],potential_cut[0,0])
plt.show()
###Output
_____no_output_____
###Markdown
This all looks fine. In the region between 0.0 and 0.1, we have accumulation. Let's make sure this holds up when taking into account the conduction band offset:
###Code
xgrid, ygrid, zgrid = np.mgrid[0:1, 0:1, 0:0.1:0.0005]
potential_cut = griddata(femcoords, femdata, (xgrid, ygrid, zgrid), method='linear')
plt.plot(zgrid[0,0],potential_cut[0,0])
plt.show()
###Output
_____no_output_____
###Markdown
Putting in the proper band offsets, we get:
###Code
zvec = zgrid[0,0]
potential_cut = potential_cut[0,0]
offset_CB = (InSb_CB_level/parse_unit('meV')-ref_level)/1e3
offset_VB = offset_CB-InSb_BG/parse_unit('meV')/1e3
plt.plot(zvec,offset_CB-potential_cut)
plt.plot(zvec,offset_VB-potential_cut)
plt.plot(zvec,np.zeros(zvec.shape))
plt.show()
###Output
_____no_output_____ |
content/homeworks/cs109a_hw7/lab11_MLP_students.ipynb | ###Markdown
CS-109A Introduction to Data Science Lab 11: Neural Network Basics - Introduction to `tf.keras`**Harvard University****Fall 2019****Instructors:** Pavlos Protopapas, Kevin Rader, Chris Tanner**Lab Instructors:** Chris Tanner and Eleni Kaxiras. **Authors:** Eleni Kaxiras, David Sondak, and Pavlos Protopapas.
###Code
## RUN THIS CELL TO PROPERLY HIGHLIGHT THE EXERCISES
import requests
from IPython.core.display import HTML
styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/cs109.css").text
HTML(styles)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import pandas as pd
%matplotlib inline
from PIL import Image
from __future__ import absolute_import, division, print_function, unicode_literals
# TensorFlow and tf.keras
import tensorflow as tf
tf.keras.backend.clear_session() # For easy reset of notebook state.
print(tf.__version__) # You should see a 2.0.0 here!
###Output
2.0.0
###Markdown
Instructions for running `tf.keras` with Tensorflow 2.0: 1. Create a `conda` virtual environment by cloning an existing one that you know works```conda create --name myclone --clone myenv```2. Go to [https://www.tensorflow.org/install/pip](https://www.tensorflow.org/install/pip) and follow instructions for your machine.3. In a nutshell: ```pip install --upgrade pippip install tensorflow==2.0.0 ```All references to Keras should be written as `tf.keras`. For example: ```model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10, activation='softmax')])model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) tf.keras.models.Sequentialtf.keras.layers.Dense, tf.keras.layers.Activation, tf.keras.layers.Dropout, tf.keras.layers.Flatten, tf.keras.layers.Reshapetf.keras.optimizers.SGDtf.keras.preprocessing.image.ImageDataGeneratortf.keras.regularizerstf.keras.datasets.mnist ```You could avoid the long names by using```from tensorflow import kerasfrom tensorflow.keras import layers```These imports do not work on some systems, however, because they pick up previous versions of `keras` and `tensorflow`. That is why I avoid them in this lab. Learning GoalsIn this lab we will understand the basics of neural networks and how to start using a deep learning library called `keras`. By the end of this lab, you should:- Understand how a simple neural network works and code some of its functionality from scratch.- Be able to think and do calculations in matrix notation. Also think of vectors and arrays as tensors.- Know how to install and run `tf.keras`.- Implement a simple real world example using a neural network. Part 1: Neural Networks 101 Suppose we have an input vector $X=${$x_1, x_2, ... x_L$} to a $k$-layered network. Each layer has its own number of nodes. For the first layer in our drawing that number is $J$. We can store the weights for each node in a vector $\mathbf{W} \in \mathbb{R}^{JxL+1}$ (accounting for bias). Similarly, we can store the biases from each node in a vector $\mathbf{b} \in \mathbb{R}^{I}$. The affine transformation is then written as $$\mathbf{a} = \mathbf{W^T}X + \mathbf{b}$$ What we then do is "absorb" $\mathbf{b}$ into $X$ by adding a column of ones to $X$. Our $X$ matrix than becomes $\mathbf{X} \in \mathbb{R}^{JxL+1}$ and our equation: $$\mathbf{a} = \mathbf{W^T}_{plusones}X$$ We have that $\mathbf{a} \in \mathbb{R}^{J}$ as well. Next we evaluate the output from each node. We write $$\mathbf{u} = \sigma\left(\mathbf{a}\right)$$ where $\mathbf{u}\in\mathbb{R}^{J}$. We can think of $\sigma$ operating on each individual element of $\mathbf{a}$ separately or in matrix notation. If we denote each component of $\mathbf{a}$ by $a_{j}$ then we can write $$u_{j} = \sigma\left(a_{j}\right), \quad j = 1, ... J.$$ In our code we will implement all these equations in matrix notation.`tf.keras` (Tensorflow) and `numpy` perform the calculations in matrix format. *Image source: "Modern Mathematical Methods for Computational Science and Engineering" Efthimios Kaxiras and Athanassios Fokas.* Let's assume that we have 3 input points (L = 3), two hidden layers ($k=2$), and 2 nodes in each layer ($J=2$) Input Layer$𝑋$={$𝑥_1,𝑥_2,x_3$} First Hidden Layer\begin{equation} \begin{aligned}a^{(1)}_1 = w^{(1)}_{10} + w^{(1)}_{11}x_1 + w^{(1)}_{12}x_2 + w^{(1)}_{13}x_3 \\a^{(1)}_2 = w^{(1)}_{20} + w^{(1)}_{21}x_1 + w^{(1)}_{22}x_2 + w^{(1)}_{23}x_3 \\ \end{aligned}\end{equation} All this in matrix notation: $$\mathbf{a} = \mathbf{W^T}X$$ NOTE: in $X$ we have added a column of ones to account for the bias**Then the sigmoid is applied**:\begin{equation} \begin{aligned}u^{(1)}_1 = \sigma(a^{(1)}_1) \\u^{(1)}_2 = \sigma(a^{(1)}_2) \\\end{aligned}\end{equation} or in matrix notation: $$\mathbf{u} = \sigma\left(\mathbf{a}\right)$$ Second Hidden Layer\begin{equation} \begin{aligned}a^{(2)}_1 = w^{(2)}_{10} + w^{(2)}_{11}u^{(1)}_1 + w^{(2)}_{12}u^{(1)}_2 + w^{(2)}_{13}u^{(1)}_3 \\a^{(2)}_2 = w^{(2)}_{20} + w^{(2)}_{21}u^{(1)}_1 + w^{(2)}_{22}u^{(1)}_2 + w^{(2)}_{23}u^{(1)}_3 \\ \end{aligned}\end{equation}**Then the sigmoid is applied**:\begin{equation} \begin{aligned}u^{(2)}_1 = \sigma(a^{(2)}_1) \\u^{(2)}_2 = \sigma(a^{(2)}_2) \\\end{aligned}\end{equation} Output Layer If the output is categorical:For example with four classes ($M=4$): $Y$={$y_1, y_2, y_3, y_4$}, we have the affine and then the sigmoid is lastly applied: \begin{equation} \begin{aligned}a^{(3)}_1 = w^{(3)}_{10} + w^{(3)}_{11}u^{(2)}_1 + w^{(3)}_{12}u^{(2)}_2 \\a^{(3)}_2 = w^{(3)}_{20} + w^{(3)}_{21}u^{(2)}_1 + w^{(3)}_{22}u^{(2)}_2 \\ a^{(3)}_3 = w^{(3)}_{30} + w^{(3)}_{31}u^{(2)}_1 + w^{(3)}_{32}u^{(2)}_2 \\a^{(3)}_4 = w^{(3)}_{40} + w^{(3)}_{41}u^{(2)}_1 + w^{(3)}_{42}u^{(2)}_2 \\\end{aligned}\end{equation}\begin{equation} \begin{aligned}y_1 = \sigma(a^{(3)}_1) \\y_2 = \sigma(a^{(3)}_2) \\y_3 = \sigma(a^{(3)}_3) \\y_3 = \sigma(a^{(3)}_4) \\\end{aligned}\end{equation}$\sigma$ will be softmax in the case of multiple classes and sigmoid for binary. If the output is a number (regression):We have a single y as output:\begin{equation} \begin{aligned}y = w^{(3)}_{10}+ w^{(3)}_{11}u^{(2)}_1 + w^{(3)}_{12}u^{(2)}_2 + w^{(3)}_{13}u^{(2)}_3 \\\end{aligned}\end{equation} Matrix Multiplication and constant addition
###Code
# make two arrays and multiply them using np.dot(a, b) or tf.matmul(a, b)
# both Tensorflow and numpy take care of transposing.
# how do we add the constant in the matrix
###Output
_____no_output_____
###Markdown
In class exercise : Plot the sigmoidDefine the `sigmoid` and the `tanh`. For `tanh` you may use `np.tanh` and for the `sigmoid` use the general equation:\begin{align}\sigma = \dfrac{1}{1+e^{-2(x-c)/a}} \qquad\text{(1.1)}\textrm{}\end{align}Generate a list of 500 $x$ points from -5 to 5 and plot both functions. What do you observe? What do variables $c$ and $a$ do?
###Code
# your code here
# %load solutions/sigmoid.py
# The smaller the `a`, the sharper the function is.
# Variable `c` moves the function along the x axis
def sigmoid(x,c,a):
z = ((x-c)/a)
return 1.0 / (1.0 + np.exp(-z))
x = np.linspace(-5.0, 5.0, 500) # input points
c = 1.
a = 0.5
plt.plot(x, sigmoid(x, c, a), label='sigmoid')
plt.plot(x, np.tanh(x), label='tanh')
plt.grid();
plt.legend();
###Output
_____no_output_____
###Markdown
2. In class exercise: Approximate a Gaussian function using a node and manually adjusting the weights. Start with one layer with one node and move to two nodes. The task is to approximate (learn) a function $f\left(x\right)$ given some input $x$. For demonstration purposes, the function we will try to learn is a Gaussian function: \begin{align}f\left(x\right) = e^{-x^{2}}\textrm{}\end{align}Even though we represent the input $x$ as a vector on the computer, you should think of it as a single input. 2.1 Start by plotting the above function using the $x$ dataset
###Code
x = np.linspace(-5.0, 5.0, 500) # input points
def gaussian(x):
return np.exp(-x*x)
f = gaussian(x)
plt.plot(x, f, label='gaussian')
plt.legend()
f.shape
###Output
_____no_output_____
###Markdown
2.2 Now, let's code the single node as per the image above. Write a function named `affine` that does the transformation. The definition is provided below. Then create a simpler sigmoid with just one variable. We choose a **sigmoid** activation function and specifically the **logistic** function. Sigmoids are a family of functions and the logistic function is just one member in that family. $$\sigma\left(z\right) = \dfrac{1}{1 + e^{-z}}.$$ Define both functions in code.
###Code
def affine(x, w, b):
"""Return affine transformation of x
INPUTS
======
x: A numpy array of points in x
w: An array representing the weight of the perceptron
b: An array representing the biases of the perceptron
RETURN
======
z: A numpy array of points after the affine transformation
z = wx + b
"""
# Code goes here
return z
# your code here
# %load solutions/affine-sigmoid.py
def affine(x, w, b):
return w * x + b
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
###Output
_____no_output_____
###Markdown
And now we plot the activation function and the true function. What do you think will happen if you change $w$ and $b$?
###Code
w = [.., .., ..] # Create a list of weights to try
b = [.., .., ..] # Create a list of biases
fig, ax = plt.subplots(1,1, figsize=(9,5))
SIZE = 16
# plot our true function, the gaussian
ax.plot(x, f, lw=4, ls='-.', label='True function')
# plot 3 "networks"
for wi, bi in zip(w, b):
h = sigmoid(affine(x, wi, bi))
ax.plot(x, h, lw=4, label=r'$w = {0}$, $b = {1}$'.format(wi,bi))
ax.set_title('Single neuron network', fontsize=SIZE)
# Create labels (very important!)
ax.set_xlabel('$x$', fontsize=SIZE) # Notice we make the labels big enough to read
ax.set_ylabel('$y$', fontsize=SIZE)
ax.tick_params(labelsize=SIZE) # Make the tick labels big enough to read
ax.legend(fontsize=SIZE, loc='best') # Create a legend and make it big enough to read
###Output
_____no_output_____
###Markdown
We didn't do an exhaustive search of the weights and biases, but it sure looks like this single perceptron is never going to match the actual function. Again, we shouldn't be suprised about this. The output layer of the network is simple the logistic function, which can only have so much flexibility.Let's try to make our network more flexible by using **more nodes**! Multiple Perceptrons in a Single LayerIt appears that a single neuron is somewhat limited in what it can accomplish. What if we expand the number of nodes/neurons in our network? We have two obvious choices here. One option is to add depth to the network by putting layers next to each other. The other option is to stack neurons on top of each other in the same layer. Now the network has some width, but is still only one layer deep.
###Code
x = np.linspace(-5.0, 5.0, 500) # input points
f = np.exp(-x*x) # data
w = np.array([..., ...]) # ENTER TWO VALUES HERE
b = np.array([..., ...]) # HERE TOO
# Affine transformations
z1 = w[0] * x + b[0]
z2 = w[1] * x + b[1]
# Node outputs
h1 = sigmoid(z1)
h2 = sigmoid(z2)
###Output
_____no_output_____
###Markdown
Now let's plot things and see what they look like.
###Code
fig, ax = plt.subplots(1,1, figsize=(9,5))
ax.plot(x, f, lw=4, ls = '-.', label='True function')
ax.plot(x, h1, lw=4, label='First neuron')
ax.plot(x, h2, lw=4, label='Second neuron')
# Set title
ax.set_title('Comparison of Neuron Outputs', fontsize=SIZE)
# Create labels (very important!)
ax.set_xlabel('$x$', fontsize=SIZE) # Notice we make the labels big enough to read
ax.set_ylabel('$y$', fontsize=SIZE)
ax.tick_params(labelsize=SIZE) # Make the tick labels big enough to read
ax.legend(fontsize=SIZE, loc='best') # Create a legend and make it big enough to read
###Output
_____no_output_____
###Markdown
Just as we expected. Some sigmoids. Of course, to get the network prediction we must combine these two sigmoid curves somehow. First we'll just add $h_{1}$ and $h_{2}$ without any weights to see what happens. NoteWe are **not** doing classification here. We are trying to predict an actual function. The sigmoid activation is convenient when doing classification because you need to go from $0$ to $1$. However, when learning a function, we don't have as good of a reason to choose a sigmoid.
###Code
# Network output
wout = np.ones(2) # Set the output weights to unity to begin
bout = -1 # bias
yout = wout[0] * h1 + wout[1] * h2 + bout
###Output
_____no_output_____
###Markdown
And plot.
###Code
fig, ax = plt.subplots(1,1, figsize=(9,5))
ax.plot(x, f, ls='-.', lw=4, label=r'True function')
ax.plot(x, yout, lw=4, label=r'$y_{out} = h_{1} + h_{2}$')
# Create labels (very important!)
ax.set_xlabel('$x$', fontsize=SIZE) # Notice we make the labels big enough to read
ax.set_ylabel('$y$', fontsize=SIZE)
ax.tick_params(labelsize=SIZE) # Make the tick labels big enough to read
ax.legend(fontsize=SIZE, loc='best') # Create a legend and make it big enough to read
###Output
_____no_output_____
###Markdown
Very cool! The two nodes interact with each other to produce a pretty complicated-looking function. It still doesn't match the true function, but now we have some hope. In fact, it's starting to look a little bit like a Gaussian!We can do better. There are three obvious options at this point:1. Change the number of nodes2. Change the activation functions3. Change the weights We will leave this simple example for some other time! Let's move on to fashion items! Part 2: Tensors, Fashion, and Reese WitherspoonWe can think of tensors as multidimensional arrays of real numerical values; their job is to generalize matrices to multiple dimensions. While tensors first emerged in the 20th century, they have since been applied to numerous other disciplines, including machine learning. Tensor decomposition/factorization can solve, among other, problems in unsupervised learning settings, temporal and multirelational data. For those of you that will get to handle images for Convolutional Neural Networks, it's a good idea to have the understanding of tensors of rank 3.We will use the following naming conventions:- scalar = just a number = rank 0 tensor ($a$ ∈ $F$,)- vector = 1D array = rank 1 tensor ( $x = (\;x_1,...,x_i\;)⊤$ ∈ $F^n$ )- matrix = 2D array = rank 2 tensor ( $\textbf{X} = [a_{ij}] ∈ F^{m×n}$ )- 3D array = rank 3 tensor ( $\mathscr{X} =[t_{i,j,k}]∈F^{m×n×l}$ )- $\mathscr{N}$D array = rank $\mathscr{N}$ tensor ( $\mathscr{T} =[t_{i1},...,t_{i\mathscr{N}}]∈F^{n_1×...×n_\mathscr{N}}$ ) <-- Things start to get complicated here... Tensor indexingWe can create subarrays by fixing some of the given tensor’s indices. We can create a vector by fixing all but one index. A 2D matrix is created when fixing all but two indices. For example, for a third order tensor the vectors are$\mathscr{X}[:,j,k]$ = $\mathscr{X}[j,k]$ (column), $\mathscr{X}[i,:,k]$ = $\mathscr{X}[i,k]$ (row), and $\mathscr{X}[i,j,:]$ = $\mathscr{X}[i,j]$ (tube) Tensor multiplicationWe can multiply one matrix with another as long as the sizes are compatible ((n × m) × (m × p) = n × p), and also multiply an entire matrix by a constant. Numpy `numpy.dot` performs a matrix multiplication which is straightforward when we have 2D or 1D arrays. But what about > 3D arrays? The function will choose according to the matching dimentions but if we want to choose we should use `tensordot`, but, again, we **do not need tensordot** for this class. Reese WitherspoonThis image is from the dataset [Labeled Faces in the Wild](http://vis-www.cs.umass.edu/lfw/person/Reese_Witherspoon.html) used for machine learning training. Images are 24-bit RGB images (height, width, channels) with 8 bits for each of R, G, B channel. Explore and print the array.
###Code
# load and show the image
FILE = '../fig/Reese_Witherspoon.jpg'
#img = np.array(Image.open(FILE)) # this works as well
img = mpimg.imread(FILE)
imgplot = plt.imshow(img)
print(f'The image is a: {type(img)} of shape {img.shape}')
img[3:5, 3:5, :]
###Output
The image is a: <class 'numpy.ndarray'> of shape (150, 150, 3)
###Markdown
Slicing tensors: slice along each axis
###Code
# we want to show each color channel
fig, axes = plt.subplots(1, 3, figsize=(10,10))
for i, subplot in zip(range(3), axes):
temp = np.zeros(img.shape, dtype='uint8')
temp[:,:,i] = img[:,:,i]
subplot.imshow(temp)
subplot.set_axis_off()
plt.show()
###Output
_____no_output_____
###Markdown
Multiplying Images with a scalar (just for fun, does not really help us in any way)
###Code
temp = img
temp = temp * 2
plt.imshow(temp)
###Output
_____no_output_____
###Markdown
For more on image manipulation by `matplotlib` see: [matplotlib-images](https://matplotlib.org/3.1.1/tutorials/introductory/images.html) Anatomy of an Artificial Neural NetworkIn Part 1 we hand-made a neural network by writing some simple python functions. We focused on a regression problem where we tried to learn a function. We practiced using the logistic activation function in a network with multiple nodes, but a single or two hidden layers. Some of the key observations were:* Increasing the number of nodes allows us to represent more complicated functions * The weights and biases have a very big impact on the solution* Finding the "correct" weights and biases is really hard to do manually* There must be a better method for determining the weights and biases automaticallyWe also didn't assess the effects of different activation functions or different network depths. https://www.tensorflow.org/guide/keras`tf.keras` is TensorFlow's high-level API for building and training deep learning models. It's used for fast prototyping, state-of-the-art research, and production. `Keras` is a library created by François Chollet. After Google released Tensorflow 2.0, the creators of `keras` recommend that "Keras users who use multi-backend Keras with the TensorFlow backend switch to `tf.keras` in TensorFlow 2.0. `tf.keras` is better maintained and has better integration with TensorFlow features". IMPORTANT: In `Keras` everything starts with a Tensor of N samples as input and ends with a Tensor of N samples as output. The 3 parts of an ANN- **Part 1: the input layer** (our dataset)- **Part 2: the internal architecture or hidden layers** (the number of layers, the activation functions, the learnable parameters and other hyperparameters)- **Part 3: the output layer** (what we want from the network)In the rest of the lab we will practice with end-to-end neural network training1. Load the data 2. Define the layers of the model.3. Compile the model.4. Fit the model to the train set (also using a validation set).5. Evaluate the model on the test set.6. Plot metrics such as accuracy.7. Predict on random images from test set.8. Predict on a random image from the web!
###Code
seed = 7
np.random.seed(seed)
###Output
_____no_output_____
###Markdown
Fashion MNIST MNIST, the set of handwritten digits is considered the Drosophila of Machine Learning. It has been overused, though, so we will try a slight modification to it.**Fashion-MNIST** is a dataset of clothing article images (created by [Zalando](https://github.com/zalandoresearch/fashion-mnist)), consisting of a training set of 60,000 examples and a test set of 10,000 examples. Each example is a **28 x 28** grayscale image, associated with a label from **10 classes**. The creators intend Fashion-MNIST to serve as a direct drop-in replacement for the original MNIST dataset for benchmarking machine learning algorithms. It shares the same image size and structure of training and testing splits. Each pixel is 8 bits so its value ranges from 0 to 255.Let's load and look at it! 1. Load the data
###Code
%%time
# get the data from keras
# load the data splitted in train and test! how nice!
# normalize the data by dividing with pixel intensity
# (each pixel is 8 bits so its value ranges from 0 to 255)
# classes are named 0-9 so define names for plotting clarity
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
# plot
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(x_train[i], cmap=plt.cm.binary)
plt.xlabel(class_names[y_train[i]])
plt.show()
plt.imshow(x_train[3], cmap=plt.cm.binary)
x_train.shape, x_test.shape
y_train.shape
###Output
_____no_output_____
###Markdown
2. Define the layers of the model. 3. Compile the model
###Code
model.summary()
tf.keras.utils.plot_model(
model,
#to_file='model.png', # if you want to save the image
show_shapes=True, # True for more details than you need
show_layer_names=True,
rankdir='TB',
expand_nested=False,
dpi=96
)
###Output
_____no_output_____
###Markdown
[Everything you wanted to know about a Keras Model and were afraid to ask](https://www.tensorflow.org/api_docs/python/tf/keras/Model) 4. Fit the model to the train set (also using a validation set)This is the part that takes the longest.-----------------------------------------------------------**ep·och** noun: epoch; plural noun: epochs. A period of time in history or a person's life, typically one marked by notable events or particular characteristics. Examples: "the Victorian epoch", "my Neural Netwok's epochs". -----------------------------------------------------------
###Code
%%time
# the core of the network training
###Output
_____no_output_____
###Markdown
Save the modelYou can save the model so you do not have `.fit` everytime you reset the kernel in the notebook. Network training is expensive!For more details on this see [https://www.tensorflow.org/guide/keras/save_and_serialize](https://www.tensorflow.org/guide/keras/save_and_serialize)
###Code
# save the model so you do not have to run the code everytime
model.save('fashion_model.h5')
# Recreate the exact same model purely from the file
#model = tf.keras.models.load_model('fashion_model.h5')
###Output
_____no_output_____
###Markdown
5. Evaluate the model on the test set.
###Code
test_loss, test_accuracy = model.evaluate(x_test, y_test, verbose=0)
print(f'Test accuracy={test_accuracy}')
###Output
Test accuracy=0.8866999745368958
###Markdown
6. We learn a lot by studying History! Plot metrics such as accuracy. You can learn a lot about neural networks by observing how they perform while training. You can issue `kallbacks` in `keras`. The networks's performance is stored in a `keras` callback aptly named `history` which can be plotted.
###Code
print(history.history.keys())
# plot accuracy and loss for the test set
fig, ax = plt.subplots(1,2, figsize=(20,6))
ax[0].plot(history.history['accuracy'])
ax[0].plot(history.history['val_accuracy'])
ax[0].set_title('Model accuracy')
ax[0].set_ylabel('accuracy')
ax[0].set_xlabel('epoch')
ax[0].legend(['train', 'val'], loc='best')
ax[1].plot(history.history['loss'])
ax[1].plot(history.history['val_loss'])
ax[1].set_title('Model loss')
ax[1].set_ylabel('loss')
ax[1].set_xlabel('epoch')
ax[1].legend(['train', 'val'], loc='best')
###Output
_____no_output_____
###Markdown
7. Now let's use the Network for what it was meant to do: Predict!
###Code
predictions = model.predict(x_test)
predictions[0]
np.argmax(predictions[0]), class_names[np.argmax(predictions[0])]
###Output
_____no_output_____
###Markdown
Let's see if our network predicted right! Is the first item what was predicted?
###Code
plt.figure()
plt.imshow(x_test[0], cmap=plt.cm.binary)
plt.xlabel(class_names[y_test[0]])
plt.colorbar()
###Output
_____no_output_____
###Markdown
**Correct!!** Now let's see how confident our model is by plotting the probability values:
###Code
# code source: https://www.tensorflow.org/tutorials/keras/classification
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array, true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array, true_label[i]
plt.grid(False)
plt.xticks(range(10))
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
i = 406
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], y_test, x_test)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], y_test)
plt.show()
###Output
_____no_output_____
###Markdown
8. Predicting in the real worldLet's see if our network can generalize beyond the MNIST fashion dataset. Let's give it an random googled image of a boot. Does it have to be a clothing item resembling the MNIST fashion dataset? Can it be a puppy?**Download an image from the internet and resize it to 28x28.**
###Code
# your code here.
# Resize it to 28 x 28 and one channel (you could do this outside the notebook)
# make into one channel and see .shape
###Output
_____no_output_____
###Markdown
`tf.keras` models are optimized to make predictions on a batch, or collection, of examples at once. Accordingly, even though you're using a single image, you need to add it to a list:
###Code
# your code here
# Add the image to a batch where it's the only member.
# your code here
# print the prediction
###Output
_____no_output_____ |
presidio_evaluator/data_generator/helper notebooks/NER_dataset to synth.ipynb | ###Markdown
Generate new examples based on this dataset: https://www.kaggle.com/abhinavwalia95/entity-annotated-corpusThis notebook takes the ner dataset from the previous link, and creates templates (utterances with placeholders) for a PII synthetic data generator to use in order to create new sentences.Note that due to the nature of the tagging, there might be weird output sentences. For example:- The same entity shows multiple times in sentence: "I travel from Argentina to Argentina"- Bad grammer due to the lack of inflection and changes to nouns due to context: "*The statement said no Denmark or India-led troops were killed*" instead of "*The statement said no Danish or Indian led troops were killed*"- Unrealistic sentences due to change in entities: "Prime minister Lebron James enters the government building in Kuala Lumpur"The notebook additionally introduces two new entities: TITLE and ROLE, in order to overcome cases like "UK David Scott called his wife", where the original sentence is "UK Prime Minister Boris Johnson called his wife" as "Prime Minister" was originally tagged as PER in the original dataset. Same logic goes for titles, like Mr., Mrs., Ms.
###Code
import pandas as pd
#First, Download ner.csv from https://www.kaggle.com/abhinavwalia95/entity-annotated-corpus
ner_dataset = pd.read_csv("ner.csv",encoding = "ISO-8859-1", error_bad_lines=False)
ner_dataset.columns
len(ner_dataset)
ner_dataset = ner_dataset.drop_duplicates()
len(ner_dataset)
###Output
_____no_output_____
###Markdown
Example sentence:
###Code
ner_dataset[ner_dataset['sentence_idx']==13][['sentence_idx','word','tag','prev-word','prev-prev-word','next-word']]
###Output
_____no_output_____
###Markdown
New entities - Title and Role- **Title**: Mr., Mrs., Professor, Doctor, ...- **Role**: President, Secretary General, U.N. Secretary, ... Quick exploratory analysis of frequencies:- First PER token- Second PER token- First and second PER token- One before and first tokens of PER
###Code
# Evaluate words before I-per
bper = ner_dataset[ner_dataset['tag']=='B-per']
bper_tokens = bper['word']
prev_bper_token = bper['prev-word']
next_bper_token = bper['next-word']
two_prev_tokens = zip(prev_bper_token, bper_tokens)
two_next_tokens = zip(bper_tokens, next_bper_token)
from collections import Counter
print("20 most common PER token frequencies:")
Counter(bper_tokens).most_common(20)
print("20 most common previous and first PER token frequencies:")
Counter(two_prev_tokens).most_common(20)
print("20 most common first and second PER token frequencies:")
Counter(two_next_tokens).most_common(20)
# Lists of titles and roles to update as ttl, rol
TITLES = ['Mr.','Ms.','Mrs.']
ROLES = ['President','General','Senator','Secretary-General','Minister','General']
BIGRAMS_ROLES = [('Prime','Minister'),('prime','minister'),('U.S.','President'),
('Venezuelan', 'President'),('Vice','President'), ('Foreign', 'Minister'),
('U.S.','Secretary'),('U.N.','Secretary'),('Defence','Secretary')]
# Update title and per for most common cases
def fix_bigram_title(df, row,index,first='Prime',second='Minister',tag='ttl'):
if row['word'] == first and row['next-word'] == second and 'per' in row['tag']:
df.loc[index,'tag'] = 'B-{}'.format(tag)
elif row['word'] == second and row['prev-word'] == first and 'per' in row['tag']:
df.loc[index,'tag'] = 'I-{}'.format(tag)
elif row['tag']== 'I-per' and row['prev-word'] == second and 'per' in row['tag']:
df.loc[index,'tag'] = 'B-per'
def fix_unigram_title(df, prev_row,prev_index, row , index, title='President',tag='ttl'):
#print(row)
if prev_row['word'] == title and prev_row['tag'] == 'B-per' and row['tag']=='I-per':
df.loc[prev_index,'tag']='B-{}'.format(tag)
df.loc[index,'tag'] = 'B-per'
prev_row = None
prev_index = None
for index, row in ner_dataset.iterrows():
# Handle 'Prime Minister'
for bigram in BIGRAMS_ROLES:
fix_bigram_title(ner_dataset,row,index,bigram[0],bigram[1],'rol')
if prev_row is not None:
for title in TITLES:
fix_unigram_title(df=ner_dataset,prev_row=prev_row,prev_index=prev_index,row=row,index=index,title=title,tag='ttl')
for role in ROLES:
fix_unigram_title(ner_dataset,prev_row,prev_index,row,index,role,'rol')
prev_row = row
prev_index = index
ner_dataset[ner_dataset['sentence_idx']==13][['sentence_idx','word','tag','prev-word','prev-prev-word','next-word']]
# keep only relevant columns
dataset = ner_dataset[['sentence_idx','word','tag']]
dataset.to_csv("../../../datasets/ner_with_titles.csv",encoding = "ISO-8859-1")
###Output
_____no_output_____
###Markdown
Create templates base on NER dataset
###Code
import re
class SentenceGetter(object):
def __init__(self, dataset):
self.n_sent = 1
self.dataset = dataset
self.empty = False
agg_func = lambda s: [(w, t) for w,t in zip(s["word"].values.tolist(),
s["tag"].values.tolist())]
self.grouped = self.dataset.groupby("sentence_idx").apply(agg_func)
self.sentences = [s for s in self.grouped]
def get_next(self):
try:
s = self.grouped["Sentence: {}".format(self.n_sent)]
self.n_sent += 1
return s
except:
return None
@staticmethod
def get_template(grouped,entity_name_replace_dict=None):
TAGS_TO_IGNORE = ['nat','eve','art','tim']
template = ""
i=0
cur_index = 0
ents = []
for token in grouped:
token_text = token[0].replace("[", "").replace("]","")
token_tag = token[1]
if token_tag == 'O':
template += " " + token_text
elif 'B-' in token_tag and token_tag[2:] not in TAGS_TO_IGNORE:
if entity_name_replace_dict:
ent = entity_name_replace_dict[token[1][2:]]
else:
ent = token_tag[2:]
ents.append(ent)
template += " [" + ent + "]"
template = re.sub(r'\s([?,\':.!"](?:|$))+', r'\1', template)
for ent in ents:
weird = "[{}] [{}]".format(ent,ent)
template = template.replace(weird,"[{}]".format(ent))
#remove additional weird combinations:
to_replace = {
"[COUNTRY] [ROLE] [PERSON]": "[ROLE] [PERSON]",
"[COUNTRY] [ROLE]" : "[ROLE]",
"[ORGANIZATION] [ROLE] [PERSON]" : "[ORGANIZATION]'s [ROLE] [PERSON]",
"[COUNTRY] [LOCATION]" : "[LOCATION]",
"[LOCATION] [COUNTRY]": "[LOCATION]",
"[PERSON] [COUNTRY]" : "[PERSON]",
"[PERSON] [LOCATION]" : "[PERSON]",
"[COUNTRY] [PERSON]" : "[PERSON]",
"[LOCATION] [PERSON]" : "[PERSON]",
"The [ORGANIZATION]" : "[ORGANIZATION]",
"[PERSON] [ORGANIZATION]" : "[PERSON]",
"of [ORGANIZATION] [PERSON]" : "of [ORGANIZATION], [PERSON]",
"[ORGANIZATION] [PERSON]" : "[PERSON]",
"[PERSON] [PERSON]": "[PERSON]",
"[LOCATION] says" : "[PERSON] says",
"[LOCATION] said" : "[PERSON] said"
}
for weird in to_replace.keys():
template = template.replace(weird,to_replace[weird])
return template.strip()
getter = SentenceGetter(dataset)
ENTITIES_DICTIONARY = {"per":"PERSON","gpe":"COUNTRY","geo":"LOCATION","org":"ORGANIZATION",'ttl':'TITLE','rol':'ROLE'}
sentences = getter.sentences
print("original:",sentences[12])
print("template:", getter.get_template(sentences[12],entity_name_replace_dict=ENTITIES_DICTIONARY))
new_templates = [SentenceGetter.get_template(sentence, ENTITIES_DICTIONARY) for sentence in sentences]
new_templates[:5]
# save to file
with open("../../presidio_evaluator/data_generator/raw_data/new_templates2.txt","w+", encoding = "ISO-8859-1") as f:
for template in new_templates:
f.write("%s\n" % template)
###Output
_____no_output_____
###Markdown
Generate new examples based on this dataset: https://www.kaggle.com/abhinavwalia95/entity-annotated-corpusThis notebook takes the ner dataset from the previous link, and creates templates (utterances with placeholders) for a PII synthetic data generator to use in order to create new sentences.Note that due to the nature of the tagging, there might be weird output sentences. For example:- The same entity shows multiple times in sentence: "I travel from Argentina to Argentina"- Bad grammer due to the lack of inflection and changes to nouns due to context: "*The statement said no Denmark or India-led troops were killed*" instead of "*The statement said no Danish or Indian led troops were killed*"- Unrealistic sentences due to change in entities: "Prime minister Lebron James enters the government building in Kuala Lumpur"The notebook additionally introduces two new entities: TITLE and ROLE, in order to overcome cases like "UK David Scott called his wife", where the original sentence is "UK Prime Minister Boris Johnson called his wife" as "Prime Minister" was originally tagged as PER in the original dataset. Same logic goes for titles, like Mr., Mrs., Ms.
###Code
import pandas as pd
#First, Download ner.csv from https://www.kaggle.com/abhinavwalia95/entity-annotated-corpus
ner_dataset = pd.read_csv("ner.csv",encoding = "ISO-8859-1", error_bad_lines=False)
ner_dataset.columns
len(ner_dataset)
ner_dataset = ner_dataset.drop_duplicates()
len(ner_dataset)
###Output
_____no_output_____
###Markdown
Example sentence:
###Code
ner_dataset[ner_dataset['sentence_idx']==13][['sentence_idx','word','tag','prev-word','prev-prev-word','next-word']]
###Output
_____no_output_____
###Markdown
New entities - Title and Role- **Title**: Mr., Mrs., Professor, Doctor, ...- **Role**: President, Secretary General, U.N. Secretary, ... Quick exploratory analysis of frequencies:- First PER token- Second PER token- First and second PER token- One before and first tokens of PER
###Code
# Evaluate words before I-per
bper = ner_dataset[ner_dataset['tag']=='B-per']
bper_tokens = bper['word']
prev_bper_token = bper['prev-word']
next_bper_token = bper['next-word']
two_prev_tokens = zip(prev_bper_token, bper_tokens)
two_next_tokens = zip(bper_tokens, next_bper_token)
from collections import Counter
print("20 most common PER token frequencies:")
Counter(bper_tokens).most_common(20)
print("20 most common previous and first PER token frequencies:")
Counter(two_prev_tokens).most_common(20)
print("20 most common first and second PER token frequencies:")
Counter(two_next_tokens).most_common(20)
# Lists of titles and roles to update as ttl, rol
TITLES = ['Mr.','Ms.','Mrs.']
ROLES = ['President','General','Senator','Secretary-General','Minister','General']
BIGRAMS_ROLES = [('Prime','Minister'),('prime','minister'),('U.S.','President'),
('Venezuelan', 'President'),('Vice','President'), ('Foreign', 'Minister'),
('U.S.','Secretary'),('U.N.','Secretary'),('Defence','Secretary')]
# Update title and per for most common cases
def fix_bigram_title(df, row,index,first='Prime',second='Minister',tag='ttl'):
if row['word'] == first and row['next-word'] == second and 'per' in row['tag']:
df.loc[index,'tag'] = 'B-{}'.format(tag)
elif row['word'] == second and row['prev-word'] == first and 'per' in row['tag']:
df.loc[index,'tag'] = 'I-{}'.format(tag)
elif row['tag']== 'I-per' and row['prev-word'] == second and 'per' in row['tag']:
df.loc[index,'tag'] = 'B-per'
def fix_unigram_title(df, prev_row,prev_index, row , index, title='President',tag='ttl'):
#print(row)
if prev_row['word'] == title and prev_row['tag'] == 'B-per' and row['tag']=='I-per':
df.loc[prev_index,'tag']='B-{}'.format(tag)
df.loc[index,'tag'] = 'B-per'
prev_row = None
prev_index = None
for index, row in ner_dataset.iterrows():
# Handle 'Prime Minister'
for bigram in BIGRAMS_ROLES:
fix_bigram_title(ner_dataset,row,index,bigram[0],bigram[1],'rol')
if prev_row is not None:
for title in TITLES:
fix_unigram_title(df=ner_dataset,prev_row=prev_row,prev_index=prev_index,row=row,index=index,title=title,tag='ttl')
for role in ROLES:
fix_unigram_title(ner_dataset,prev_row,prev_index,row,index,role,'rol')
prev_row = row
prev_index = index
ner_dataset[ner_dataset['sentence_idx']==13][['sentence_idx','word','tag','prev-word','prev-prev-word','next-word']]
# keep only relevant columns
dataset = ner_dataset[['sentence_idx','word','tag']]
dataset.to_csv("../../../datasets/ner_with_titles.csv",encoding = "ISO-8859-1")
###Output
_____no_output_____
###Markdown
Create templates base on NER dataset
###Code
import re
class SentenceGetter(object):
def __init__(self, dataset):
self.n_sent = 1
self.dataset = dataset
self.empty = False
agg_func = lambda s: [(w, t) for w,t in zip(s["word"].values.tolist(),
s["tag"].values.tolist())]
self.grouped = self.dataset.groupby("sentence_idx").apply(agg_func)
self.sentences = [s for s in self.grouped]
def get_next(self):
try:
s = self.grouped["Sentence: {}".format(self.n_sent)]
self.n_sent += 1
return s
except:
return None
@staticmethod
def get_template(grouped,entity_name_replace_dict=None):
TAGS_TO_IGNORE = ['nat','eve','art','tim']
template = ""
i=0
cur_index = 0
ents = []
for token in grouped:
token_text = token[0].replace("[", "").replace("]","")
token_tag = token[1]
if token_tag == 'O':
template += " " + token_text
elif 'B-' in token_tag and token_tag[2:] not in TAGS_TO_IGNORE:
if entity_name_replace_dict:
ent = entity_name_replace_dict[token[1][2:]]
else:
ent = token_tag[2:]
ents.append(ent)
template += " [" + ent + "]"
template = re.sub(r'\s([?,\':.!"](?:|$))+', r'\1', template)
for ent in ents:
weird = "[{}] [{}]".format(ent,ent)
template = template.replace(weird,"[{}]".format(ent))
#remove additional weird combinations:
to_replace = {
"[COUNTRY] [ROLE] [PERSON]": "[ROLE] [PERSON]",
"[COUNTRY] [ROLE]" : "[ROLE]",
"[ORGANIZATION] [ROLE] [PERSON]" : "[ORGANIZATION]'s [ROLE] [PERSON]",
"[COUNTRY] [LOCATION]" : "[LOCATION]",
"[LOCATION] [COUNTRY]": "[LOCATION]",
"[PERSON] [COUNTRY]" : "[PERSON]",
"[PERSON] [LOCATION]" : "[PERSON]",
"[COUNTRY] [PERSON]" : "[PERSON]",
"[LOCATION] [PERSON]" : "[PERSON]"],
"The [ORGANIZATION]" : "[ORGANIZATION]"
"[PERSON] [ORGANIZATION]" : "[PERSON]",
"of [ORGANIZATION] [PERSON]" : "of [ORGANIZATION], [PERSON]",
"[ORGANIZATION] [PERSON]" : "[PERSON]",
"[PERSON] [PERSON]": "[PERSON]",
"[LOCATION] says" : "[PERSON] says",
"[LOCATION] said" : "[PERSON] said"
}
for weird in to_replace.keys():
template = template.replace(weird,to_replace[weird])
return template.strip()
getter = SentenceGetter(dataset)
ENTITIES_DICTIONARY = {"per":"PERSON","gpe":"COUNTRY","geo":"LOCATION","org":"ORGANIZATION",'ttl':'TITLE','rol':'ROLE'}
sentences = getter.sentences
print("original:",sentences[12])
print("template:", getter.get_template(sentences[12],entity_name_replace_dict=ENTITIES_DICTIONARY))
new_templates = [SentenceGetter.get_template(sentence, ENTITIES_DICTIONARY) for sentence in sentences]
new_templates[:5]
# save to file
with open("../../presidio_evaluator/data_generator/raw_data/new_templates2.txt","w+", encoding = "ISO-8859-1") as f:
for template in new_templates:
f.write("%s\n" % template)
###Output
_____no_output_____ |
StageA-Quizcode.ipynb | ###Markdown
Rajeshwari Mishra Stage A Quiz Code question 11-20
###Code
#importing libraries
import numpy as np
import pandas as pd
#csv dataset
df = pd.read_csv('C:\\Users\\neha2\\Downloads\\FoodBalanceSheets_E_Africa_NOFLAG.csv', encoding = 'latin-1')
print(df)
#Question 11
#Answer the following questions based on the African food production dataset provided by the FAO website already provided
#What is the total sum of Animal Fat produced in 2014 and 2017 respectively?
df.groupby('Item').sum()
df[['Item', 'Y2014', 'Y2017']].groupby(['Item']).sum().loc['Animal fats']
#Question 12
#What is the mean and standard deviation across the whole dataset for the year 2015 to 3 decimal places?
print('%.3f'%df['Y2015'].mean())
print('\n')
print('%.3f'%df['Y2015'].std())
#Question 13
#What is the total number and percentage of missing data in 2016 to 2 decimal places?
df['Y2016'].isnull().sum()
(df['Y2016'].isnull().sum()/len(df['Y2016']))*100
#Question 14
#Which year had the highest correlation with ‘Element Code’?
df.corr()
#Question 15
#What year has the highest sum of Import Quantity?
#Hint- Perform a groupby operation on ‘Element’ and use the resulting Dataframe to answer the question
df.groupby('Element').sum()
#Question 16
#What is the total number of the sum of Production in 2014?
#Hint- Perform a groupby operation on ‘Element’ and use the resulting Dataframe to answer the question
production = df.groupby('Element')['Y2014'].sum()
print(production['Production'])
#Question 17
#Which of these elements had the highest sum in 2018?
#Hint- Select columns ‘Y2018’ and ‘Element’, Perform a groupby operation on ‘Element’ on the selected dataframe and answer the question.
df[['Element','Y2018']].groupby('Element').sum().sort_values('Y2018', ascending=True)
#Question 18
#Which of these elements had the 3rd lowest sum in 2018?
#Hint- Select columns ‘Y2018’ and ‘Element’, Perform a groupby operation on ‘Element’ on the selected dataframe and answer the question.
df[['Element','Y2018']].groupby('Element').sum().sort_values('Y2018', ascending=True)
#Question 19
#What is the total Import Quantity in Algeria in 2018?
df.groupby(['Area', 'Element']).sum().loc['Algeria', 'Import Quantity']['Y2018']
#Question 20
#What is the total number of unique countries in the dataset?
len(pd.unique(df['Area']))
###Output
_____no_output_____ |
notebooks/Part_1_Data_Preparation.ipynb | ###Markdown
Part 1: Data PreparationPlease make sure you have __notebook__ and __nltk__ Python packages installed in the compute context you choose as kernel. For demonstration purpose, this series of notebooks uses the `local` compute context.**NOTE**: Python 3 kernel doesn't include Azure Machine Learning Workbench functionalities. Please switch the kernel to `local` before continuing further. To install __notebook__ and __nltk__, please uncomment and run the following script.
###Code
# !pip install --upgrade notebook
# !pip install --upgrade nltk
###Output
_____no_output_____
###Markdown
Import Required Python Modules
###Code
import pandas as pd
import numpy as np
import re, os, gzip, requests, warnings
from azureml.logging import get_azureml_logger
warnings.filterwarnings("ignore")
run_logger = get_azureml_logger()
run_logger.log('amlrealworld.QnA-matching.part1-data-preparation','true')
###Output
_____no_output_____
###Markdown
Access Sample DataIn this example, we have collected a set of Q&A pairs from Stack Overflow site tagged as `JavaScript` questions. The data contains 1,201 original Q&A pairs as well as many duplicate questions, i.e. new questions that Stack Overflow users have linked back to pre-existing Q&A pairs that effectively provide answers to these new questions. The data schema of the original questions (Q), duplicate questions (D), and answers (A) can be found in the following table:| Dataset | Field | Type | Description| ----------|------------|------------|--------| question (Q) | Id | String | The unique question ID (primary key)| | AnswerId | String | The unique answer ID per question| | Text0 | String | The raw text data including the question's title and body| | CreationDate | Timestamp | The timestamp of when the question has been asked| dupes (D) | Id | String | The unique duplication ID (primary key)| | AnswerId | String | The answer ID associated with the duplication| | Text0 | String | The raw text data including the duplication's title and body| | CreationDate | Timestamp | The timestamp of when the duplication has been asked| answers (A) | Id | String | The unique answer ID (primary key)| | text0 | String | The raw text data of the answerThe datasets are compressed and stored in Azure Blob storage as `.tsv.gz` files and this section provides you the code to retreive the data in the notebook.
###Code
# load raw data from a .tsv.gz file into Pandas data frame.
def read_csv_gz(url, **kwargs):
df = pd.read_csv(gzip.open(requests.get(url, stream=True).raw, mode='rb'), sep='\t', encoding='utf8', **kwargs)
return df.set_index('Id')
# URLs to Original questions, Duplications, and Answers.
questions_url = 'https://bostondata.blob.core.windows.net/stackoverflow/orig-q.tsv.gz'
dupes_url = 'https://bostondata.blob.core.windows.net/stackoverflow/dup-q.tsv.gz'
answers_url = 'https://bostondata.blob.core.windows.net/stackoverflow/ans.tsv.gz'
# load datasets.
questions = read_csv_gz(questions_url, names=('Id', 'AnswerId', 'Text0', 'CreationDate'))
dupes = read_csv_gz(dupes_url, names=('Id', 'AnswerId', 'Text0', 'CreationDate'))
answers = read_csv_gz(answers_url, names=('Id', 'Text0'))
###Output
_____no_output_____
###Markdown
To provide some example, here are the first five rows of the __questions__ table:
###Code
questions.head(5)
###Output
_____no_output_____
###Markdown
Here is the full text of one __original__ question, whose is `Id` is `220231`. The `AnswerId` associated with this question is `220233`.
###Code
# This text include the HTML code.
print(questions["Text0"][220231])
###Output
Accessing the web page's HTTP Headers in JavaScript. <p>How do I access a page's HTTP response headers via JavaScript?</p> <p>Related to <a href="http://stackoverflow.com/questions/220149/how-do-i-access-the-http-request-header-fields-via-javascript"><strong>this question</strong></a>, which was modified to ask about accessing two specific HTTP headers.</p> <blockquote> <p><strong>Related:</strong><br> <a href="http://stackoverflow.com/questions/220149/how-do-i-access-the-http-request-header-fields-via-javascript">How do I access the HTTP request header fields via JavaScript?</a></p> </blockquote>
###Markdown
Here is the full text of the __answer__ associated with the above original question:
###Code
print(answers["Text0"][220233])
###Output
<p>Unfortunately, there isn't an API to give you the HTTP response headers for your initial page request. That was the original question posted here. It has been <a href="http://stackoverflow.com/questions/12258705/how-can-i-read-the-current-headers-without-making-a-new-request-with-js">repeatedly asked</a>, too, because some people would like to get the actual response headers of the original page request without issuing another one.</p> <h1><br/>For AJAX Requests:</h1> <p>If an HTTP request is made over AJAX, it is possible to get the response headers with the <strong><code>getAllResponseHeaders()</code></strong> method. It's part of the XMLHttpRequest API. To see how this can be applied, check out the <em><code>fetchSimilarHeaders()</code></em> function below. Note that this is a work-around to the problem that won't be reliable for some applications.</p> <pre><code>myXMLHttpRequest.getAllResponseHeaders(); </code></pre> <ul> <li><p>The API was specified in the following candidate recommendation for XMLHttpRequest: <a href="http://www.w3.org/TR/XMLHttpRequest/#the-getresponseheader-method" rel="nofollow">XMLHttpRequest - W3C Candidate Recommendation 3 August 2010</a></p></li> <li><p>Specifically, the <code>getAllResponseHeaders()</code> method was specified in the following section: <a href="http://www.w3.org/TR/XMLHttpRequest/#the-getallresponseheaders()-method" rel="nofollow">w3.org: <code>XMLHttpRequest</code>: the <code>getallresponseheaders()</code> method</a> </p></li> <li><p>The MDN documentation is good, too: <a href="https://developer.mozilla.org/en-US/docs/Web/API/XMLHttpRequest" rel="nofollow">developer.mozilla.org: <code>XMLHttpRequest</code></a>.</p></li> </ul> <p>This will not give you information about the original page request's HTTP response headers, but it could be used to make educated guesses about what those headers were. More on that is described next.</p> <h1><br/>Getting header values from the Initial Page Request:</h1> <p>This question was first asked several years ago, asking specifically about how to get at the original HTTP response headers for the <em>current page</em> (i.e. the same page inside of which the javascript was running). This is quite a different question than simply getting the response headers for any HTTP request. For the initial page request, the headers aren't readily available to javascript. Whether the header values you need will be reliably and sufficiently consistent if you request the same page again via AJAX will depend on your particular application.</p> <p>The following are a few suggestions for getting around that problem.</p> <h2><br/>1. Requests on Resources which are largely static</h2> <p>If the response is largely static and the headers are not expected to change much between requests, you could make an AJAX request for the same page you're currently on and assume that they're they are the same values which were part of the page's HTTP response. This could allow you to access the headers you need using the nice XMLHttpRequest API described above.</p> <pre><code>function fetchSimilarHeaders (callback) { var request = new XMLHttpRequest(); request.onreadystatechange = function () { if (request.readyState === 4) { // // The following headers may often be similar // to those of the original page request... // if (callback && typeof callback === 'function') { callback(request.getAllResponseHeaders()); } } }; // // Re-request the same page (document.location) // We hope to get the same or similar response headers to those which // came with the current page, but we have no guarantee. // Since we are only after the headers, a HEAD request may be sufficient. // request.open('HEAD', document.location, true); request.send(null); } </code></pre> <p>This approach will be problematic if you truly have to rely on the values being consistent between requests, since you can't fully guarantee that they are the same. It's going to depend on your specific application and
###Markdown
__Duplicate__ questions share the same `AnswerId` as the original question they link to. Here is the first duplicate question linked to the above original question:
###Code
print(dupes.query("AnswerId == 220233").iloc[0]["Text0"])
###Output
Monitoring http request header on a page. <blockquote> <p><strong>Possible Duplicates:</strong><br> <a href="http://stackoverflow.com/questions/220231/accessing-http-headers-in-javascript">Accessing HTTP Headers in Javascript?</a><br> <a href="http://stackoverflow.com/questions/220149/how-do-i-access-the-http-request-header-fields-via-javascript">How do I access the HTTP request header fields via JavaScript?</a> </p> </blockquote> <p>We can use httpwatch on IE or httpfox on Firefox to monitor http activity</p> <p>If i don't want to use any plugs on browser...</p> <p>Is it possible to monitor http request header on a page just by javascript?</p>
###Markdown
Pre-process Text Data Clean up textThe raw data is in `HTML` format and needs to be cleaned up for any further analysis. We exclude HTML tags, links and code snippets from the data.
###Code
# remove embedded code chunks, HTML tags and links/URLs.
def clean_text(text):
global EMPTY
EMPTY = ''
if not isinstance(text, str):
return text
text = re.sub('<pre><code>.*?</code></pre>', EMPTY, text)
def replace_link(match):
return EMPTY if re.match('[a-z]+://', match.group(1)) else match.group(1)
text = re.sub('<a[^>]+>(.*)</a>', replace_link, text)
return re.sub('<[^>]+>', EMPTY, text)
for df in (questions, dupes, answers):
df['Text'] = df['Text0'].apply(clean_text).str.lower()
df['NumChars'] = df['Text'].str.len()
###Output
_____no_output_____
###Markdown
Set data selection criteriaTo obtain the high quality datasets for phrase learning and model training, we requires a minimum length of characters in the text field. Different thresholds are considered for original questions, duplications, and answers, respectively. Also, each Q&A pair in our set must have a minimum of 3 additional semantically equivalent duplicate questions linked to it.
###Code
# find the AnswerIds has at least 3 dupes.
def find_answerId(answersC, dupesC, num_dupes):
countHash = {}
for i in dupesC.AnswerId:
if i not in answersC.index.values:
continue
if i not in countHash.keys():
countHash[i] = 1
else:
countHash[i] += 1
countHash = {k: v for k, v in countHash.items() if v >= num_dupes}
commonAnswerId = countHash.keys()
return commonAnswerId
# extract data based on the selection criteria.
def select_data(questions, dupes, answers):
# exclude the records without any text
questions_nz = questions.query('NumChars > 0')
dupes_nz = dupes.query('NumChars > 0')
answers_nz = answers.query('NumChars > 0')
# get the 10th percentile of text length as the minimum length of characters to consider in the text field
minLenQ = questions_nz.quantile(.1)['NumChars']
minLenD = dupes_nz.quantile(.1)['NumChars']
minLenA = answers_nz.quantile(.1)['NumChars']
# eliminate records with text less than the minimum length
questionsC = questions.query('NumChars >' + str(int(minLenQ)))
dupesC = dupes.query('NumChars >' + str(minLenD))
answersC = answers.query('NumChars >' + str(minLenA))
# remove the records in dupesC whose questionId has already existed in questionsC
duplicatedIndex = list(set(questionsC.index).intersection(set(dupesC.index)))
dupesC.drop(duplicatedIndex, inplace=True)
# make sure Questions 1:1 match with Answers
matches = questionsC.merge(answersC, left_on = 'AnswerId', right_index = True)
questionsC = matches[['AnswerId', 'Text0_x', 'CreationDate', 'Text_x', 'NumChars_x']]
questionsC.columns = ['AnswerId', 'Text0', 'CreationDate', 'Text', 'NumChars']
answersC = matches[['Text0_y', 'Text_y', 'NumChars_y']]
answersC.index = matches['AnswerId']
answersC.columns = ['Text0', 'Text', 'NumChars']
# find the AnswerIds has at least 3 dupes
commonAnswerId = find_answerId(answersC, dupesC, 3)
# select the records with those AnswerIds
questionsC = questionsC.loc[questionsC.AnswerId.isin(commonAnswerId)]
dupesC = dupesC.loc[dupesC.AnswerId.isin(commonAnswerId)]
return questionsC, dupesC
# some questions have been linked to multiple AnswerIds.
# we keep the first AnswerId associated with that question and remove the rest.
questions = questions.groupby(questions.index).first()
dupes = dupes.groupby(dupes.index).first()
# execute the data selection function on questions, dupes and answers.
questionsC, dupesC = select_data(questions, dupes, answers)
###Output
_____no_output_____
###Markdown
Prepare Training and Test datasetsIn this example, we retain original question and 75% of the duplicate questions for training, and hold-out the most recently posted 25% of duplicate questions as test data. The training and test data are split by `CreationDate`.- training set = Original questions + 75% of oldest Duplications per original question- test set = remaining 25% of Duplications per original question
###Code
# split Original questions and their Duplications into training and test sets.
def split_data(questions, dupes, frac):
trainQ = questions
testQ = pd.DataFrame(columns = dupes.columns.values) # create an empty data frame
for answerId in np.unique(dupes.AnswerId):
df = dupes.query('AnswerId == ' + str(answerId))
totalCount = len(df)
splitPoint = int(totalCount * frac)
dfSort = df.sort_values(by = ['CreationDate'])
trainQ = trainQ.append(dfSort.head(splitPoint)) # oldest N percent of duplications
testQ = testQ.append(dfSort.tail(totalCount - splitPoint))
# convert data type to int
testQ[["AnswerId", "NumChars"]] = testQ[["AnswerId", "NumChars"]].astype(int)
# rename the index
testQ.index.rename("Id", inplace=True)
return trainQ, testQ
trainQ, testQ = split_data(questionsC, dupesC, 0.75)
trainQ.head(5)
###Output
_____no_output_____
###Markdown
Select Subsets with Sufficient Training Questions per Answer ClassIn our past experiments, we notice that some Q&A pairs only link to a small number of duplicate questions. This means those answer classes may contain an insufficient amount of examples to train an accurate model. We examine the effect of the number of duplicate questions available for training for each Q&A pair. The above Figure shows results for questions relative to the number of training examples available for the correct Q&A pair that should be returned. Most of our Q&A pairs (857 out of 1201) have 5 or fewer known duplicate questions available for training. Performance on these questions is relatively weak, with the correct Q&A pair landing in the top 10 results less than 40% of the time. However, when greater numbers of duplicate questions are available for training, performance improves dramatically; when Q&A pairs have 50 or more duplicate questions available for training, the classification model places these pairs in the top 10 of the retrieved results 98% of the time when they correctly match the query. The most duplicated question contains 962 duplications. For the study in this notebook, we only consider the answer classes that have more than 13 training questions (original and duplicate questions) in this notebook. This reduces the entire dataset to 5,153 training questions, 1,735 test questions, and 103 unique answer classes.
###Code
countPerAns = pd.DataFrame({"NumTrain" : trainQ.groupby("AnswerId").size()})
trainQwithCount = trainQ.merge(countPerAns, left_on="AnswerId", right_index=True)
testQwithCount = testQ.merge(countPerAns, left_on="AnswerId", right_index=True)
# for each Answer class, we request more than 13 training questions.
trainQ = trainQwithCount[trainQwithCount["NumTrain"] > 13]
testQ = testQwithCount[testQwithCount["NumTrain"] > 13]
print("# of training examples: " + str(len(trainQ)))
print("# of testing examples: " + str(len(testQ)) + "\n")
print("A quick glance of the training data: \n")
trainQ[["AnswerId", "Text"]].head(5)
###Output
# of training examples: 5153
# of testing examples: 1735
A quick glance of the training data:
###Markdown
Save Outputs to a Share Directory in the WorkbenchAzure Machine Learning Workbench provides a flexible way of saving intermediate files. `os.environ.get('AZUREML_NATIVE_SHARE_DIRECTORY')` retrieves a share directory where the files are stored. Those files can be accessed from other notebooks or Python files.
###Code
workfolder = os.environ.get('AZUREML_NATIVE_SHARE_DIRECTORY')
trainQ.to_csv(os.path.join(workfolder, 'trainQ_part1'), sep='\t', header=True, index=True, index_label='Id')
testQ.to_csv(os.path.join(workfolder, 'testQ_part1'), sep='\t', header=True, index=True, index_label='Id')
###Output
_____no_output_____ |
Week3.ipynb | ###Markdown
Week 3: Real Time Data, Understanding Orders, and Arbitrage WebSocket Tutorial What is a WebSocket?A WebSocket is a persistent connection between a client and a server.- A WebSocket API has distinct differences from the RESTful APIs that we learned about earlier. - The main difference between the two is that WebSockets are stateful protocols. REST is stateless. - A server may send data to the client when data is updated with WebSockets, which is different from REST where the client must request the information via HTTP.- Due to different protocols being used for WebSockets than for REST (HTTP), there is less overhead. Additionally, getting data from a server is no longer has to be a two-step process (Send a Request -> Wait for a Response) - For this reason, WebSockets are almost always more efficient performance-wise.- **Question** If WebSockets have better performance and simplifies pulling data, why would we ever use REST? - The biggest reason is that HTTP requests are better supported throughout the web. More compatibility generally means easier integration. Why are WebSockets Important for Quant?Because real-time quote data is *extremely* important for algos, WebSockets APIs can offer a faster and more efficient method of data transfer.- We could *technically* use a RESTful API to pull quote data in regular intervals, but this would be inefficent. - **Remember** WebSockets are a *two-way street*. The web resource (server) can send data to the client and vice versa.- Less overhead also means more scalability which could be important later down the road. Combining Python and WebSocketsImportant Python libaries and modules for WebSockets :- **websocket** - a library that allows you to start a WebSocket connection. Final RemarksWebSockets are not only useful, but almost ecessary when creating an algo.- WebSocket APIs solve some of the pitfalls of REST APIs.- In applications where streaming data is important, WebSockets are the way to go.
###Code
from config import *
import websocket
import json
import numpy as np
SOCKET_ENDPOINT = "wss://data.alpaca.markets/stream"
STOCKS = ["T.AAPL"]
# T. is trades (ie all trades executed by exchanges) Q. is quotes (what people are willing to buy/sell for)
# all websockets take 3 mandatory functions: on open, on message, and on close.
# On open gets called once when we try to connect
# on message gets called each time we get a new piece of data sent to us
# on close gets called when we close the connection (stop listening to data)
def on_open(ws):
# we have to send an authentication message
auth_data = {
"action": "authenticate",
"data": {
"key_id": f"{KEY}",
"secret_key": f"{SECRET_KEY}"
}
}
# authenticate our connection
ws.send(json.dumps(auth_data))
channel_data = {
"action": "listen",
"data": {"streams": STOCKS} # streams is what channels we want to subscribe to
}
ws.send(json.dumps(channel_data))
def on_message(ws, message):
# every time a message gets sent to us we have to define what to do. In this instance we'll just print it
# but we can get more complex by doing stuff like analyzing the high frequency data to make orders
print("received a message")
print(message)
def on_close(ws):
print("closed connection")
ws = websocket.WebSocketApp(SOCKET_ENDPOINT, on_open=on_open, on_message=on_message, on_close=on_close)
# you'll want to run this in a local terminal for it to work
ws.run_forever()
###Output
received a message
{"stream":"authorization","data":{"action":"authenticate","status":"unauthorized"}}
received a message
{"stream":"authorization","data":{"action":"listen","status":"unauthorized"}}
closed connection
###Markdown
Quote Data Tutorial What is a Quote?Stock quote data consists of basic statistics about a certain stock.- Quote data generally includes...: - Bid-Ask Spread. - Most recent order size. - Most recent stock price. - Volume. - Day's High/Low. - 52 Week High/Low. - and a lot more...- For the purposes of this course, we will be focusing on the *Bid-Ask Spread* and *Order Size*.- These statistics are especially important for traders and quantitative analysts who use price movement and stock trends to make decisions. What is a Bid-Ask Spread?A Bid-Ask Spread is the range of prices others are willing to buy or sell a security at.- A **Bid** is the maximum price someone is willing to buy a security.- An **Ask** is the minimum price someone is willing to sell their security. - Think of buying stocks as an auction--the seller wants to sell at the highest price possible and the buyer wants to buy at the lowest price possible. - Ultimately both parties are generally trying to reach a deal (a price they are both happy with).- A Bid-Ask Spread is affected by a multitude of factors... : - **Supply & Demand** : The Bid-Ask Spread is in a way a direct representation of the supply and demand of a security. - **Volume** : Related to Supply & Demand; higher volume generally means a smaller Bid-Ask Spread. - **Order Size** : Again relate to Supply & Demand; if an order size is bigger, it will have more of an effect on the Bid-Ask Spread. - and more...- The Bid-Ask Spread is important for anyone involved in the financial markets. - The Bid-Ask Spread ultimately determines a security's price. - For stocks with high liquidity, the price of the stock is generally accepted to be the average of the bid price and ask price. - For stocks with low liquidity, the price you have to pay to buy a stock will be closer to the ask price and vice versa for when you want to sell a stock. - The Bid-Ask Spread is especially important for quantitative analysts who use High Frequency Trading that utilize the Bid-Ask Spread. - The "Market Makers" make money by utilizing the difference between the bid price and the ask price. Data TypesUsually when we see quote data from exchanges, we get what is called **level 1 data**. This basically means that we only see one set ofbid-ask data. - An example of level 1 quote data we might get from an exchange could look something like this:```{ticker: "AAPL", bid_price: 130, ask_price: 130.05, volume: 10}```. This is contrasted with **level 2 data**, which gives the volumes at different bid and ask prices. When people submit a limit buy order for a stock,there could be a range of prices within 30 cents. In level 1 data we would only see the best possible buy and sell prices, which hides the "market depth" at each time interval.In this course we'll only be dealing with level 1 data, but you should be aware that for any given tick the range of bids might extend 30 centsbelow the maximum bid price, and the range of asks could extend 30 cents above the minimum ask price. What is Order Size?Order size refers to the number of securities an order calls to buy or sell.- Order Size is important because it directly affects the Bid-Ask Spread and therefore the price of the stock. - Example : If you place a limit buy order for 50 shares of `AAPL`, you have effectively changed the price of `AAPL`! Though it would be completely insignificant given Apple's market cap...- Through the law of *Supply & Demand*, bigger order sizes will effect the price of a security more. - Example : If you place a limit sell order for 100k shares of a small cap penny stock, you will likely have a direct impact on its price.- **Note :** An order does *NOT* have to be filled in order to affect a security's price. As a matter of fact, an order and its order size no longer directly affects the security's price once it has been filled. This is very important to know, especially for quantitative analysis. - This means you can effectively manipulate a stock's price by consecutively placing and cancelling massive orders in one direction. This practice is called *price manipulation* and it becomes *illegal* at a certain extent. - This practice caused the *2010 Flash Crash*. Since then more safeguards have been put in place to avoid this happening in the future. I saved some of this raw data I collected into a csv file, now let's analyze itI ran this for maybe 30 minutes. As you can see we get a ton of data when we're operating on the tick frequency
###Code
import pandas as pd
import matplotlib.pyplot as plt
spyData = pd.read_csv("spy.csv")
# if you don't know SPY is an index fund comprised of the S&P 500 companies, so it's generally a good
# indicator of general market performance on the long term
spyData
# columns are as follows: ticker, bid exchange, bid size (in hundreds), ask exchange, ask price, ask size, unix time
# let's first see how bid and ask price changes over this interval
fig, (ax1,ax2) = plt.subplots(nrows=1, ncols=2, figsize=(17,7))
ax1.plot(spyData["p"], label="bid price")
ax1.set_xlabel("time")
ax1.set_ylabel("price (USD)")
ax1.set_title("bid price vs time")
ax1.legend()
ax2.plot(spyData["P"], label="ask price")
ax2.set_xlabel("ime")
ax2.set_ylabel("price (USD)")
ax2.set_title("ask price vs time")
ax2.legend()
plt.show()
# another obvious one to look at is bid and ask.
# As a quant we might believe there is some correllation between the
# number of shares being traded and a quick dip or gain in the price
fig, (ax1,ax2) = plt.subplots(nrows=1, ncols=2, figsize=(17,7))
ax1.plot(spyData["s"], label="bid size")
ax1.set_xlabel("time")
ax1.set_ylabel("Number of shares (hundreds)")
ax1.set_title("bid size vs time")
ax1.legend()
ax2.plot(spyData["S"], label="ask size")
ax2.set_xlabel("time")
ax2.set_ylabel("Number of shares (hundreds)")
ax2.set_title("ask size vs time")
ax2.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Lets think about this dataWhat first strikes me is the change of "density" in the bid and ask size. From the 8-12k time mark in the bid size we see a general increase in the number of shares being purchased (more blue) (we'll call this bid density), whereas in the 2-6k time mark in the ask size we see a general increase in the number of shares being sold (we'll call this ask desity). The interesting thing is that the increase in bid density correlates with an increase in bid and ask price (around 15 cents), whereas the increasing ask density correlates with a decrease in bid and ask price (around 20 cents).Now this is only one example on a small portion of data, but with a bit of work we could easily turn this into a trading strategy where we buy or sell positions in SPY based on noticing whether this bid/ask density exhibits strong changes. We could even test this out on different stocks. SPY generally is not very volatile, but we could get bigger returns for bigger risk with more volatile assets. Analyzing data and noticing patterns is the name of the game in quant world. The next step would be to make some sort of mathematical definition of this bid ask density so we can calculate it and correctly identify whether we want to enter or exit a position in a trading algorithm and test our results. So what do we do with this data?Answer: By getting data almost instantly (10ish milliseconds latency - but this will differ by location) from exchanges, we're among the first people in the markets to know what people are doing. Here are three key strategies that "high frequency traders" use to make money. We'll illustrate them with examples then get a bit more technical. Market Making: Say you go to a farmer's market and want to buy some apples. You see that farmers are selling apples for 1.00 (bid price) a piece, but suppose you heard people waiting in line to get in saying they're willing to buy apples for 1.05 (ask price) a piece. With this information, you run ahead of everyone else and buy the apples for they're market price of 1.00 then run back to the people in line and sell your apples for 1.05 a piece. In essense you are able to act faster than everyone else and use that to your advantage. By doing so you're rewarded with a risk-free(ish) profit of 5 cents a peiceIn the real market scenario, you have algorithms watching quotes from exchanges and if there is a sufficiently large bid ask spread, you'll purchase the shares for the bid price then immediately sell them at the ask price, being rewarded with the spread. The issue here is that if someone sells the shares to that third person faster than you, then you've got shares you don't want to have and no one to buy them from you. Arbitrage: Say you are at the same farmers market and can purchase apples for 1.00 a piece, but you know there's another famers market across town with people buying apples at 1.20 a peice. You purchase the apples, drive across town, then dump your apples.In a trading scenario, you can purchase shares of Apple stock at various stock exchanges, say NYSE and Hong Kong Stock Exchange. We would expect these prices to be very close, but occasionally the price may differ by more than a few cents. Then we can purchase shares at the cheeper exchange then sell them at the more expensive exchange and pocket the spread. Trade Following (more falls into statarb):I can't think of a good example for this, so we'll go straight into the market example. Say we're watching trade streams from an exchange and notice that a huge amount of shares are trying to be sold, which might be a big bank liquidating their position. We believe that this liquidation will cause a quick dip in share price (higher supply, same demand), so we quickly short the stock and hope the price does what we want. Order Types Tutorial What is an Order?Like the name implies, an order is a request to buy or sell a security.- There are 2 main types of orders : - **Market Order :** Used to purchase or sell a security regardless of the price. Best option when you would like to have your order fufilled regardless of price fluctuations. - **Warning :** A market order can be "dangerous" when the security has low volume (liquidity) because of the wide bid-ask spread. Additionally, you are less likely to get the "best price" despite being more likely to have your order filled. - **Note :** Many brokerages do *not* allow you to use market orders during premarket and afterhours hours, because the low liquidty relative to regular market hours can cause massive fluctations in price. - **Limit Order :** Used to purchase or sell a security above or below a certain price. This is the most comon - **Note :** You are less likely to have your order filled if you use a limit order. This is because you are bidding or asking a certain price, which the market may or may not agree with.- Depending on the security's liquidity and how much you need your order to get filled, you should pick which order type to use accordingly.- **Note :** A market order where you sell a security is referred to as a *market sell order*, a limit order where you buy a security is referred to as a *limit buy order*, etc. What is a Stop Loss?A stop loss is a conditional order that is automatically executed by your brokerage if a certain condition that you set is met.- Most commonly a stop loss is a *conditional market sell order* that is executed when a certain security goes below a predetermined value. - Example : If you set a stop loss for a certain stock you own at $50.50, a *market sell order* will be executed if the stock dips below $50.50. - **Warning :** Because a stop loss is ultimately a *market order*, you must be careful when you use it.- There are other types of stop losses such as *trailing stop losses*, *stop limits*, etc. For the purposes of this course, you don't have to know all of them, but they are mostly self-explainatory. Example Scenarios**Disclaimer : These are informative examples, and shouldn't be treated as investment advice or recommendations. If you choose to trade in the financial markets, you are liable for the decisions that you make.**Example 1 : You would like to buy Apple stock `NASDAQ:AAPL` because you are *confident* that the stock will go up at open. What order type should you use?- Because `AAPL` is a highly liquid stock and you are *confident* that the stock will go up, you would more likely want to use a *market buy order*.Example 2 : You would like to daytrade Hertz stock `OTCMKTS:HTZGQ` which has relatively low volume and is a penny stock. What order type should you use?- Because `HTZGQ` is a relatively illiquid stock with high volatility, you would more likely want to use a *limit order*.Example 3 : You would like to buy Amazon stock `NASDAQ:AMZN` because you believe that Amazon is a good long term investment. What order type should you use?- Trick question. There is no correct answer, but you would generally want to use *limit buy orders* unless you have a very good reason to use a *market buy order*.Example 4 : You have a few shares of Tesla `NASDAQ:TSLA` and you are afraid that the stock will tank. What order type should you use to protect your gains?- Another trick question. Ideally you would use a *limit sell order* to sell your stocks when you cannot take any more risk, but it would also be valid to use a *stop loss*.All in all, you generally want to use more *limit orders* than *market orders* or *stop losses* because you have more control over your capital and your securities. We'll code up a quick market maker algo
###Code
import json
from setup_requests import make_order
# the idea here is that we'll watch quote streams then try to buy at the bid price and sell at the ask price
def analyze(ws, message):
json_dict = json.loads(message)
quote = json_dict["data"]
if quote["ev"] == "Q":
asset = quote["T"]
bid_price = quote["p"]
bid_size = quote["s"]
ask_price = quote["P"]
ask_size = quote["S"]
# naive approach: buy if bid-ask spread > $.03
spread = quote["P"] - quote["p"]
# we want a reasonably large spread so we make good money for the risk
if spread >= .05 and bid_size > ask_size:
order_size = 1
buy_order = make_order(asset, order_size, "buy", "limit", "fok", ask_price)
if buy_order["status"] == "accepted":
print(f"bought {order_size} of {asset} at {bid_price}")
sell_order = make_order(asset, order_size, "sell", "limit", "fok", bid_price)
if sell_order["status"] == "accepted":
print(f"sold {order_size} of {asset} at {ask_price}")
###Output
_____no_output_____
###Markdown
Now to test this out, we just make a quick change to our websocket.
###Code
def on_open(ws):
# we have to send an authentication message
auth_data = {
"action": "authenticate",
"data": {
"key_id": f"{KEY}",
"secret_key": f"{SECRET_KEY}"
}
}
# authenticate our connection
ws.send(json.dumps(auth_data))
channel_data = {
"action": "listen",
"data": {"streams": STOCKS} # streams is what channels we want to subscribe to
}
ws.send(json.dumps(channel_data))
def on_message(ws, message):
analyze(ws, message)
def on_close(ws):
print("closed connection")
ws = websocket.WebSocketApp(SOCKET_ENDPOINT, on_open=on_open, on_message=on_message, on_close=on_close)
ws.run_forever()
###Output
closed connection
###Markdown
Practice Quiz: While Loops **Problem:** Fill in the blanks to make the print_prime_factors function print all the prime factors of a number. A prime factor is a number that is prime and divides another without a remainder.
###Code
def print_prime_factors(number):
# Start with two, which is the first prime
factor = 2
# Keep going until the factor is larger than the number
while factor <= number:
# Check if factor is a divisor of number
if number % factor == 0:
# If it is, print it and divide the original number
print(factor)
number = number / factor
else:
# If it's not, increment the factor by one
factor += 1
return "Done"
print_prime_factors(100)
# Should print 2,2,5,5
# DO NOT DELETE THIS COMMENT
###Output
2
2
5
5
###Markdown
**Problem:** The following code can lead to an infinite loop. Fix the code so that it can finish successfully for all numbers.Note: Try running your function with the number 0 as the input, and see what you get!
###Code
def is_power_of_two(n):
# Check if the number can be divided by two without a remainder
if n == 0:
return False
while n % 2 == 0:
n = n / 2
# If after dividing by two the number is 1, it's a power of two
if n == 1:
return True
return False
print(is_power_of_two(0)) # Should be False
print(is_power_of_two(1)) # Should be True
print(is_power_of_two(8)) # Should be True
print(is_power_of_two(9)) # Should be False
###Output
False
True
True
False
###Markdown
**Problem:** Fill in the empty function so that it returns the sum of all the divisors of a number, without including it. A divisor is a number that divides into another without a remainder.
###Code
import math
def sum_divisors(n):
sum = 1
# Return the sum of all divisors of n, not including n
div = 2
if n == 0:
return 0
while div < n/2+1:
if n % div == 0:
sum += div
div += 1
return sum
print(sum_divisors(6)) # Should be 6 (sum of 1+2+3)
print(sum_divisors(12)) # Should be 16 (sum of 1+2+3+4+6)
###Output
6
16
###Markdown
**Problem:** The multiplication_table function prints the results of a number passed to it multiplied by 1 through 5. An additional requirement is that the result is not to exceed 25, which is done with the break statement. Fill in the blanks to complete the function to satisfy these conditions.
###Code
def multiplication_table(number):
# Initialize the starting point of the multiplication table
multiplier = 1
# Only want to loop through 5
while multiplier <= 5:
result = number*multiplier
# What is the additional condition to exit out of the loop?
if result>25:
break
print(str(number) + "x" + str(multiplier) + "=" + str(result))
# Increment the variable for the loop
multiplier += 1
multiplication_table(3)
# Should print: 3x1=3 3x2=6 3x3=9 3x4=12 3x5=15
multiplication_table(5)
# Should print: 5x1=5 5x2=10 5x3=15 5x4=20 5x5=25
multiplication_table(8)
# Should print: 8x1=8 8x2=16 8x3=24
###Output
3x1=3
3x2=6
3x3=9
3x4=12
3x5=15
5x1=5
5x2=10
5x3=15
5x4=20
5x5=25
8x1=8
8x2=16
8x3=24
###Markdown
Practice Quiz: For Loops **Problem:** Fill in the blanks to make the factorial function return the factorial of n. Then, print the first 10 factorials (from 0 to 9) with the corresponding number. Remember that the factorial of a number is defined as the product of an integer and all integers before it. For example, the factorial of five (5!) is equal to 1*2*3*4*5=120. Also recall that the factorial of zero (0!) is equal to 1.
###Code
def factorial(n):
result = 1
for x in range(2,n+1):
result *= x
return result
for n in range(10):
print(n, factorial(n))
###Output
0 1
1 1
2 2
3 6
4 24
5 120
6 720
7 5040
8 40320
9 362880
###Markdown
**Problem:** Write a script that prints the first 10 cube numbers (x**3), starting with x=1 and ending with x=10.
###Code
for x in range(1,11):
print(x**3)
###Output
1
8
27
64
125
216
343
512
729
1000
###Markdown
**Problem:** Write a script that prints the multiples of 7 between 0 and 100. Print one multiple per line and avoid printing any numbers that aren't multiples of 7. Remember that 0 is also a multiple of 7.
###Code
for i in range(0,101,7):
print(i)
###Output
0
7
14
21
28
35
42
49
56
63
70
77
84
91
98
###Markdown
**Problem:** The retry function tries to execute an operation that might fail, it retries the operation for a number of attempts. Currently the code will keep executing the function even if it succeeds. Modify the code so that it stops trying after the operation succeeded. ```pythondef retry(operation, attempts): for n in range(attempts): if operation(): print("Attempt " + str(n) + " succeeded") break else: print("Attempt " + str(n) + " failed")retry(create_user, 3)retry(stop_service, 5)``` N.B. The parameters to retry() are passed by coursera. So I can't run it in the cell Practice Quiz: Recursion **Problem:** Fill in the blanks to make the is_power_of function return whether the number is a power of the given base. Note: base is assumed to be a positive number. Tip: for functions that return a boolean value, you can return the result of a comparison.
###Code
def is_power_of(number, base):
# Base case: when number is smaller than base.
if number < base:
# If number is equal to 1, it's a power (base**0).
return number == 1
# Recursive case: keep dividing number by base.
return is_power_of(number/base, base)
print(is_power_of(8,2)) # Should be True
print(is_power_of(64,4)) # Should be True
print(is_power_of(70,10)) # Should be False
###Output
True
True
False
###Markdown
**Problem:** The count_users function recursively counts the amount of users that belong to a group in the company system, by going through each of the members of a group and if one of them is a group, recursively calling the function and counting the members. But it has a bug! Can you spot the problem and fix it? ```pythondef count_users(group): count = 0 for member in get_members(group): count += 1 if is_group(member): count += count_users(member) - 1 return countprint(count_users("sales")) Should be 3print(count_users("engineering")) Should be 8print(count_users("everyone")) Should be 18``` N.B. The parameters to count_users() are passed by coursera. So, I can't run it in the cell. **Problem:** Implement the sum_positive_numbers function, as a recursive function that returns the sum of all positive numbers between the number n received and 1. For example, when n is 3 it should return 1+2+3=6, and when n is 5 it should return 1+2+3+4+5=15.
###Code
def sum_positive_numbers(n):
sum = 0
if n == 1:
return 1
else:
sum += n + sum_positive_numbers(n-1)
return sum
print(sum_positive_numbers(3)) # Should be 6
print(sum_positive_numbers(5)) # Should be 15
###Output
6
15
###Markdown
Module 3 Graded Assessment **Problem:** Fill in the blanks of this code to print out the numbers 1 through 7.
###Code
number = 1
while number <= 7:
print(number, end=" ")
number += 1
###Output
1 2 3 4 5 6 7
###Markdown
**Problem:** The show_letters function should print out each letter of a word on a separate line. Fill in the blanks to make that happen.
###Code
def show_letters(word):
for letter in word:
print(letter)
show_letters("Hello")
# Should print one line per letter
###Output
H
e
l
l
o
###Markdown
**Problem:** Complete the function digits(n) that returns how many digits the number has. For example: 25 has 2 digits and 144 has 3 digits. Tip: you can figure out the digits of a number by dividing it by 10 once per digit until there are no digits left.
###Code
import math
def digits(n):
count = 0
if n == 0:
return 1
while (n>0):
count += 1
n = math.floor(n/10)
return count
print(digits(25)) # Should print 2
print(digits(144)) # Should print 3
print(digits(1000)) # Should print 4
print(digits(0)) # Should print 1
###Output
2
3
4
1
###Markdown
**Problem:** This function prints out a multiplication table (where each number is the result of multiplying the first number of its row by the number at the top of its column). Fill in the blanks so that calling multiplication_table(1, 3) will print out:1 2 32 4 63 6 9
###Code
def multiplication_table(start, stop):
for x in range(start, stop+1):
for y in range(start, stop+1):
print(x*y, end=" ")
print()
multiplication_table(1, 3)
# Should print the multiplication table shown above
###Output
1 2 3
2 4 6
3 6 9
###Markdown
**Problem:** The counter function counts down from start to stop when start is bigger than stop, and counts up from start to stop otherwise. Fill in the blanks to make this work correctly.
###Code
def counter(start, stop):
x = start
if start > stop:
return_string = "Counting down: "
while x >= stop:
return_string += str(x)
if x != stop:
return_string += ","
x -= 1
else:
return_string = "Counting up: "
while x <= stop:
return_string += str(x)
if x != stop:
return_string += ","
x += 1
return return_string
print(counter(1, 10)) # Should be "Counting up: 1,2,3,4,5,6,7,8,9,10"
print(counter(2, 1)) # Should be "Counting down: 2,1"
print(counter(5, 5)) # Should be "Counting up: 5"
###Output
Counting up: 1,2,3,4,5,6,7,8,9,10
Counting down: 2,1
Counting up: 5
###Markdown
**Problem:** The loop function is similar to range(), but handles the parameters somewhat differently: it takes in 3 parameters: the starting point, the stopping point, and the increment step. When the starting point is greater than the stopping point, it forces the steps to be negative. When, instead, the starting point is less than the stopping point, it forces the step to be positive. Also, if the step is 0, it changes to 1 or -1. The result is returned as a one-line, space-separated string of numbers. For example, loop(11,2,3) should return 11 8 5 and loop(1,5,0) should return 1 2 3 4. Fill in the missing parts to make that happen.
###Code
def loop(start, stop, step):
return_string = ""
if step == 0:
step = 1
if start > stop:
step = abs(step) * -1
else:
step = abs(step)
for count in range(start,stop,step):
return_string += str(count) + " "
return return_string.strip()
print(loop(11,2,3)) # Should be 11 8 5
print(loop(1,5,0)) # Should be 1 2 3 4
print(loop(-1,-2,0)) # Should be -1
print(loop(10,25,-2)) # Should be 10 12 14 16 18 20 22 24
print(loop(1,1,1)) # Should be empty
###Output
11 8 5
1 2 3 4
-1
10 12 14 16 18 20 22 24
###Markdown
###Code
###Output
_____no_output_____
###Markdown
Practice Quiz: While Loops **Problem:** Fill in the blanks to make the print_prime_factors function print all the prime factors of a number. A prime factor is a number that is prime and divides another without a remainder.
###Code
def print_prime_factors(number):
# Start with two, which is the first prime
factor = 2
# Keep going until the factor is larger than the number
while factor <= number:
# Check if factor is a divisor of number
if number % factor == 0:
# If it is, print it and divide the original number
print(factor)
number = number / factor
else:
# If it's not, increment the factor by one
factor += 1
return "Done"
print_prime_factors(100)
# Should print 2,2,5,5
# DO NOT DELETE THIS COMMENT
###Output
2
2
5
5
###Markdown
**Problem:** The following code can lead to an infinite loop. Fix the code so that it can finish successfully for all numbers.Note: Try running your function with the number 0 as the input, and see what you get!
###Code
def is_power_of_two(n):
# Check if the number can be divided by two without a remainder
if n == 0:
return False
while n % 2 == 0:
n = n / 2
# If after dividing by two the number is 1, it's a power of two
if n == 1:
return True
return False
print(is_power_of_two(0)) # Should be False
print(is_power_of_two(1)) # Should be True
print(is_power_of_two(8)) # Should be True
print(is_power_of_two(9)) # Should be False
###Output
False
True
True
False
###Markdown
**Problem:** Fill in the empty function so that it returns the sum of all the divisors of a number, without including it. A divisor is a number that divides into another without a remainder.
###Code
import math
def sum_divisors(n):
sum = 1
# Return the sum of all divisors of n, not including n
div = 2
if n == 0:
return 0
while div < n/2+1:
if n % div == 0:
sum += div
div += 1
return sum
print(sum_divisors(6)) # Should be 6 (sum of 1+2+3)
print(sum_divisors(12)) # Should be 16 (sum of 1+2+3+4+6)
###Output
6
16
###Markdown
**Problem:** The multiplication_table function prints the results of a number passed to it multiplied by 1 through 5. An additional requirement is that the result is not to exceed 25, which is done with the break statement. Fill in the blanks to complete the function to satisfy these conditions.
###Code
def multiplication_table(number):
# Initialize the starting point of the multiplication table
multiplier = 1
# Only want to loop through 5
while multiplier <= 5:
result = number*multiplier
# What is the additional condition to exit out of the loop?
if result>25:
break
print(str(number) + "x" + str(multiplier) + "=" + str(result))
# Increment the variable for the loop
multiplier += 1
multiplication_table(3)
# Should print: 3x1=3 3x2=6 3x3=9 3x4=12 3x5=15
multiplication_table(5)
# Should print: 5x1=5 5x2=10 5x3=15 5x4=20 5x5=25
multiplication_table(8)
# Should print: 8x1=8 8x2=16 8x3=24
###Output
3x1=3
3x2=6
3x3=9
3x4=12
3x5=15
5x1=5
5x2=10
5x3=15
5x4=20
5x5=25
8x1=8
8x2=16
8x3=24
###Markdown
Practice Quiz: For Loops **Problem:** Fill in the blanks to make the factorial function return the factorial of n. Then, print the first 10 factorials (from 0 to 9) with the corresponding number. Remember that the factorial of a number is defined as the product of an integer and all integers before it. For example, the factorial of five (5!) is equal to 1*2*3*4*5=120. Also recall that the factorial of zero (0!) is equal to 1.
###Code
def factorial(n):
result = 1
for x in range(2,n+1):
result *= x
return result
for n in range(10):
print(n, factorial(n))
###Output
0 1
1 1
2 2
3 6
4 24
5 120
6 720
7 5040
8 40320
9 362880
###Markdown
**Problem:** Write a script that prints the first 10 cube numbers (x**3), starting with x=1 and ending with x=10.
###Code
for x in range(1,11):
print(x**3)
###Output
1
8
27
64
125
216
343
512
729
1000
###Markdown
**Problem:** Write a script that prints the multiples of 7 between 0 and 100. Print one multiple per line and avoid printing any numbers that aren't multiples of 7. Remember that 0 is also a multiple of 7.
###Code
for i in range(0,101,7):
print(i)
###Output
0
7
14
21
28
35
42
49
56
63
70
77
84
91
98
###Markdown
**Problem:** The retry function tries to execute an operation that might fail, it retries the operation for a number of attempts. Currently the code will keep executing the function even if it succeeds. Modify the code so that it stops trying after the operation succeeded. ```pythondef retry(operation, attempts): for n in range(attempts): if operation(): print("Attempt " + str(n) + " succeeded") break else: print("Attempt " + str(n) + " failed")retry(create_user, 3)retry(stop_service, 5)``` N.B. The parameters to retry() are passed by coursera. So I can't run it in the cell Practice Quiz: Recursion **Problem:** Fill in the blanks to make the is_power_of function return whether the number is a power of the given base. Note: base is assumed to be a positive number. Tip: for functions that return a boolean value, you can return the result of a comparison.
###Code
def is_power_of(number, base):
# Base case: when number is smaller than base.
if number < base:
# If number is equal to 1, it's a power (base**0).
return number == 1
# Recursive case: keep dividing number by base.
return is_power_of(number/base, base)
print(is_power_of(8,2)) # Should be True
print(is_power_of(64,4)) # Should be True
print(is_power_of(70,10)) # Should be False
###Output
True
True
False
###Markdown
**Problem:** The count_users function recursively counts the amount of users that belong to a group in the company system, by going through each of the members of a group and if one of them is a group, recursively calling the function and counting the members. But it has a bug! Can you spot the problem and fix it? ```pythondef count_users(group): count = 0 for member in get_members(group): count += 1 if is_group(member): count += count_users(member) - 1 return countprint(count_users("sales")) Should be 3print(count_users("engineering")) Should be 8print(count_users("everyone")) Should be 18``` N.B. The parameters to count_users() are passed by coursera. So, I can't run it in the cell. **Problem:** Implement the sum_positive_numbers function, as a recursive function that returns the sum of all positive numbers between the number n received and 1. For example, when n is 3 it should return 1+2+3=6, and when n is 5 it should return 1+2+3+4+5=15.
###Code
def sum_positive_numbers(n):
sum = 0
if n == 1:
return 1
else:
sum += n + sum_positive_numbers(n-1)
return sum
print(sum_positive_numbers(3)) # Should be 6
print(sum_positive_numbers(5)) # Should be 15
###Output
6
15
###Markdown
Module 3 Graded Assessment **Problem:** Fill in the blanks of this code to print out the numbers 1 through 7.
###Code
number = 1
while number <= 7:
print(number, end=" ")
number += 1
###Output
1 2 3 4 5 6 7
###Markdown
**Problem:** The show_letters function should print out each letter of a word on a separate line. Fill in the blanks to make that happen.
###Code
def show_letters(word):
for letter in word:
print(letter)
show_letters("Hello")
# Should print one line per letter
###Output
H
e
l
l
o
###Markdown
**Problem:** Complete the function digits(n) that returns how many digits the number has. For example: 25 has 2 digits and 144 has 3 digits. Tip: you can figure out the digits of a number by dividing it by 10 once per digit until there are no digits left.
###Code
import math
def digits(n):
count = 0
if n == 0:
return 1
while (n>0):
count += 1
n = math.floor(n/10)
return count
print(digits(25)) # Should print 2
print(digits(144)) # Should print 3
print(digits(1000)) # Should print 4
print(digits(0)) # Should print 1
###Output
2
3
4
1
###Markdown
**Problem:** This function prints out a multiplication table (where each number is the result of multiplying the first number of its row by the number at the top of its column). Fill in the blanks so that calling multiplication_table(1, 3) will print out:1 2 32 4 63 6 9
###Code
def multiplication_table(start, stop):
for x in range(start, stop+1):
for y in range(start, stop+1):
print(x*y, end=" ")
print()
multiplication_table(1, 3)
# Should print the multiplication table shown above
###Output
1 2 3
2 4 6
3 6 9
###Markdown
**Problem:** The counter function counts down from start to stop when start is bigger than stop, and counts up from start to stop otherwise. Fill in the blanks to make this work correctly.
###Code
def counter(start, stop):
x = start
if start > stop:
return_string = "Counting down: "
while x >= stop:
return_string += str(x)
if x != stop:
return_string += ","
x -= 1
else:
return_string = "Counting up: "
while x <= stop:
return_string += str(x)
if x != stop:
return_string += ","
x += 1
return return_string
print(counter(1, 10)) # Should be "Counting up: 1,2,3,4,5,6,7,8,9,10"
print(counter(2, 1)) # Should be "Counting down: 2,1"
print(counter(5, 5)) # Should be "Counting up: 5"
###Output
Counting up: 1,2,3,4,5,6,7,8,9,10
Counting down: 2,1
Counting up: 5
###Markdown
**Problem:** The loop function is similar to range(), but handles the parameters somewhat differently: it takes in 3 parameters: the starting point, the stopping point, and the increment step. When the starting point is greater than the stopping point, it forces the steps to be negative. When, instead, the starting point is less than the stopping point, it forces the step to be positive. Also, if the step is 0, it changes to 1 or -1. The result is returned as a one-line, space-separated string of numbers. For example, loop(11,2,3) should return 11 8 5 and loop(1,5,0) should return 1 2 3 4. Fill in the missing parts to make that happen.
###Code
def loop(start, stop, step):
return_string = ""
if step == 0:
step = 1
if start > stop:
step = abs(step) * -1
else:
step = abs(step)
for count in range(start,stop,step):
return_string += str(count) + " "
return return_string.strip()
print(loop(11,2,3)) # Should be 11 8 5
print(loop(1,5,0)) # Should be 1 2 3 4
print(loop(-1,-2,0)) # Should be -1
print(loop(10,25,-2)) # Should be 10 12 14 16 18 20 22 24
print(loop(1,1,1)) # Should be empty
###Output
11 8 5
1 2 3 4
-1
10 12 14 16 18 20 22 24
|
COVID-19-analsyis.ipynb | ###Markdown
COVID-19 Analysis
###Code
import plotly.express as px
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import datetime
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
from plotly.offline import init_notebook_mode, iplot
init_notebook_mode(connected=True)
bingurl = 'https://raw.githubusercontent.com/microsoft/Bing-COVID-19-Data/master/data/Bing-COVID19-Data.csv'
bing = pd.read_csv(bingurl,index_col=0,parse_dates=[1])
df=bing
# df=bing[bing.Updated >= '3/01/2020']
# subset to only US and WW Region level without sub regions
df=df[df['Country_Region'].isin(['United States','Worldwide'])][df.AdminRegion1.isna()]
# create rolling 7 day average
df['ConfirmedChangeRolling'] = df.groupby('Country_Region')['ConfirmedChange'].rolling(7).mean().reset_index(0,drop=True)
df['DeathsChangeRolling'] = df.groupby('Country_Region')['DeathsChange'].rolling(7).mean().reset_index(0,drop=True)
df['RecoveredChangeRolling'] = df.groupby('Country_Region')['RecoveredChange'].rolling(7).mean().reset_index(0,drop=True)
df =df[df.Updated >= '3/01/2020']
USdf = df[df['Country_Region'].isin(['United States'])].copy()
USdf.index = USdf.Updated
WWdf = df[df['Country_Region'].isin(['Worldwide'])].copy()
WWdf.index = WWdf.Updated
fig = px.bar(USdf, x='Updated', y='ConfirmedChange')
fig.add_trace(go.Scatter(x=USdf.Updated, y=USdf.ConfirmedChangeRolling,
mode='lines',
name='7 Day Rolling Average'))
fig.update_layout(
height=800,
xaxis_title="Time",
yaxis_title="Daily Change in Confirmed Cases",
title_text='USA Change in Confirmed COVID-19 Cases'
)
# fig.show()
iplot(fig)
fig = px.bar(WWdf, x='Updated', y='ConfirmedChange')
fig.add_trace(go.Scatter(x=WWdf.Updated, y=WWdf.ConfirmedChangeRolling,
mode='lines',
name='7 Day Rolling Average'))
fig.update_layout(
height=800,
xaxis_title="Time",
yaxis_title="Daily Change in Confirmed Cases",
title_text='Worldwide Change in Confirmed COVID-19 Cases'
)
# fig.show()
iplot(fig)
# DeathsChange data has some odd dips on certain Sundays, likely to delays in getting new numbers
# or some other data management error. Using the rolling 7 day average makes for a cleaner trend
fig = px.line(df[df.AdminRegion1.isna()], x="Updated", y="DeathsChangeRolling", color='Country_Region')
fig.update_layout(
height=600,
xaxis_title="Time",
yaxis_title="Daily Change in Deaths",
title_text='Change in COVID-19 Deaths (USA vs World)'
)
# fig.show()
iplot(fig)
###Output
_____no_output_____ |
nbs/course2020/vision/01_Introduction.ipynb | ###Markdown
Lesson 1 - Introduction Lesson Video:
###Code
#hide_input
from IPython.lib.display import YouTubeVideo
YouTubeVideo('bw4PRyxa-y4')
###Output
_____no_output_____ |
Notebooks/M03 - Feature Engineering.ipynb | ###Markdown
IMPORTS
###Code
import math
import pandas as pd
import inflection
import numpy as np
import seaborn as sns
from IPython.core.display import HTML
from matplotlib import pyplot as plt
from IPython.display import Image
import datetime
from matplotlib import gridspec
###Output
/home/valcilio/.pyenv/versions/3.8.0/envs/Python_Do_0_ao_DS/lib/python3.8/site-packages/pandas/compat/__init__.py:97: UserWarning: Could not import the lzma module. Your installed Python is incomplete. Attempting to use lzma compression will result in a RuntimeError.
warnings.warn(msg)
###Markdown
0.1. Helper Functions 0.2. Loading data
###Code
df_sales_raw = pd.read_csv('data/train.csv', low_memory=False)
df_store_raw = pd.read_csv('data/store.csv', low_memory=False)
# merge
df_raw = pd.merge(df_sales_raw, df_store_raw, how='left', on='Store')
###Output
_____no_output_____
###Markdown
1.0. DESCRICAO DOS DADOS 1.1. Rename Columns
###Code
df1 = df_raw.copy()
cols_old = ['Store', 'DayOfWeek', 'Date', 'Sales', 'Customers', 'Open', 'Promo',
'StateHoliday', 'SchoolHoliday', 'StoreType', 'Assortment',
'CompetitionDistance', 'CompetitionOpenSinceMonth',
'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek',
'Promo2SinceYear', 'PromoInterval']
snakecase = lambda x: inflection.underscore( x )
cols_new = list(map( snakecase, cols_old))
# rename
df1.columns = cols_new
###Output
_____no_output_____
###Markdown
1.2. Data Dimensions
###Code
print('Number of Rows: {}'.format(df1.shape[0]))
print('Number of Cols: {}'.format(df1.shape[1]))
###Output
Number of Rows: 1017209
Number of Cols: 18
###Markdown
1.3. Data Types
###Code
df1['date'] = pd.to_datetime(df1['date'])
df1.dtypes
###Output
_____no_output_____
###Markdown
1.4. Check NA
###Code
df1.isna().sum()
###Output
_____no_output_____
###Markdown
1.5. Fillout NA
###Code
df1['competition_distance'].max()
#competition_distance
df1['competition_distance'] = df1['competition_distance'].apply(lambda x: 200000.0
if math.isnan(x) else x)
#competition_open_since_month
df1['competition_open_since_month'] = df1.apply(lambda x: x['date'].month if math.isnan(x['competition_open_since_month'])
else x['competition_open_since_month'], axis=1)
#competition_open_since_year
df1['competition_open_since_year'] = df1.apply(lambda x: x['date'].year if math.isnan(x['competition_open_since_year'])
else x['competition_open_since_year'], axis=1)
#promo2_since_week
df1['promo2_since_week'] = df1.apply(lambda x: x['date'].week if math.isnan(x['promo2_since_week'])
else x['promo2_since_week'], axis=1)
#promo2_since_year
df1['promo2_since_year'] = df1.apply(lambda x: x['date'].year if math.isnan(x['promo2_since_year'])
else x['promo2_since_year'], axis=1)
#promo_interval
month_map = {1: 'Jan', 2: 'Fev', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun', 7: 'Jul', 8: 'Aug', 9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec'}
df1['promo_interval'].fillna(0, inplace=True)
df1['month_map'] = df1['date'].dt.month.map(month_map)
df1['is_promo'] = df1[['promo_interval', 'month_map']].apply(lambda x: 0 if x['promo_interval'] == 0 else 1
if x['month_map'] in x['promo_interval'].split(',') else 0, axis=1)
df1.sample(5).T
df1.isna().sum()
###Output
_____no_output_____
###Markdown
1.6. Change Types
###Code
df1.dtypes
df1['competition_open_since_month'] = df1['competition_open_since_month'].astype(int)
df1['competition_open_since_year'] = df1['competition_open_since_year'].astype(int)
df1['promo2_since_week'] = df1['promo2_since_week'].astype(int)
df1['promo2_since_year'] = df1['promo2_since_year'].astype(int)
df1.dtypes
###Output
_____no_output_____
###Markdown
1.7. Descriptive Statistical
###Code
num_attributes = df1.select_dtypes(include=['int64', 'float64'])
cat_attributes = df1.select_dtypes(exclude=['int64', 'float64', 'datetime64[ns]'])
df1.isna().sum()
# Central Tendency - mean, median
ct1 = pd.DataFrame(num_attributes.apply(np.mean)).T
ct2 = pd.DataFrame(num_attributes.apply(np.median)).T
# Dispersion - std, min, max, range, skew, kurtosis
d1 = pd.DataFrame(num_attributes.apply(np.std)).T
d2 = pd.DataFrame(num_attributes.apply(min)).T
d3 = pd.DataFrame(num_attributes.apply(max)).T
d4 = pd.DataFrame(num_attributes.apply(lambda x: x.max() - x.min())).T
d5 = pd.DataFrame(num_attributes.apply(lambda x: x.skew())).T
d6 = pd.DataFrame(num_attributes.apply(lambda x: x.kurtosis())).T
# concatenate
m = pd.concat([d2, d3, d4, ct1, ct2, d1, d5, d6]).T.reset_index()
m.columns = ['attributes', 'min', 'max', 'range', 'mean', 'median', 'std', 'skew', 'kurtosis']
m
sns.distplot(df1['competition_distance'])
###Output
_____no_output_____
###Markdown
1.7.2 Categorical Attributes
###Code
cat_attributes.apply(lambda x: x.unique().shape[0])
aux1 = df1[(df1['state_holiday'] != '0') & (df1['sales'] > 0)]
plt.figure(figsize=(16, 9))
plt.subplot(1, 3, 1)
sns.boxplot(x='state_holiday', y='sales', data=aux1)
plt.subplot(1, 3, 2)
sns.boxplot(x='store_type', y='sales', data=aux1)
plt.subplot(1, 3, 3)
sns.boxplot(x='assortment', y='sales', data=aux1)
###Output
_____no_output_____
###Markdown
2.0. Passo 02 - Feature Engineering
###Code
df2 = df1.copy()
###Output
_____no_output_____
###Markdown
2.1. Mapa Mental de Hipoteses
###Code
Image('mindmap.png')
###Output
_____no_output_____
###Markdown
2.1. Criação das Hipoteses 2.1.1. Hipoteses Loja **1.** Lojas com número maior de funcionários deveriam vender mais.**2.** Lojas com maior capacidade de estoque deveriam vender mais.**3.** Lojas com maior porte deveriam vender mais.**4.** Lojas com maior sortimentos deveriam vender mais.**5.** Lojas com competidores mais próximos deveriam vender menos.**6.** Lojas com competidores à mais tempo deveriam vendem mais. 2.1.2. Hipoteses Produto **1.** Lojas que investem mais em Marketing deveriam vender mais.**2.** Lojas com maior exposição de produto deveriam vender mais.**3.** Lojas com produtos com preço menor deveriam vender mais.**5.** Lojas com promoções mais agressivas ( descontos maiores ), deveriam vender mais.**6.** Lojas com promoções ativas por mais tempo deveriam vender mais.**7.** Lojas com mais dias de promoção deveriam vender mais.**8.** Lojas com mais promoções consecutivas deveriam vender mais. 2.1.3. Hipoteses Tempo **1.** Lojas abertas durante o feriado de Natal deveriam vender mais.**2.** Lojas deveriam vender mais ao longo dos anos.**3.** Lojas deveriam vender mais no segundo semestre do ano.**4.** Lojas deveriam vender mais depois do dia 10 de cada mês.**5.** Lojas deveriam vender menos aos finais de semana.**6.** Lojas deveriam vender menos durante os feriados escolares. 2.1.4. Lista Final de Hipoteses **1.** Lojas com maior sortimentos deveriam vender mais.**2.** Lojas com competidores mais próximos deveriam vender menos.**3.** Lojas com competidores à mais tempo deveriam vendem mais. **4.** Lojas com promoções ativas por mais tempo deveriam vender mais.**5.** Lojas com mais dias de promoção deveriam vender mais.**7.** Lojas com mais promoções consecutivas deveriam vender mais. **8.** Lojas abertas durante o feriado de Natal deveriam vender mais.**9.** Lojas deveriam vender mais ao longo dos anos.**10.** Lojas deveriam vender mais no segundo semestre do ano.**11.** Lojas deveriam vender mais depois do dia 10 de cada mês.**12.** Lojas deveriam vender menos aos finais de semana.**13.** Lojas deveriam vender menos durante os feriados escolares. 2.3. Feature Engineering
###Code
# year
df2['year'] = df2['date'].dt.year
# month
df2['month'] = df2['date'].dt.month
# day
df2['day'] = df2['date'].dt.day
# week of year
df2['week_of_year'] = df2['date'].dt.weekofyear
# year week
df2['year_week'] = df2['date'].dt.strftime('%Y-%W')
# competition since
df2['competition_since'] = df2.apply(lambda x: datetime.datetime(year=x['competition_open_since_year'], month=x['competition_open_since_month'], day=1), axis=1)
df2['competition_time_month'] = ((df2['date'] - df2['competition_since'])/30).apply(lambda x: x.days).astype(int)
# promo since
df2['promo_since'] = df2['promo2_since_year'].astype(str) + '-' + df2['promo2_since_week'].astype(str)
df2['promo_since'] = df2['promo_since'].apply(lambda x: datetime.datetime.strptime(x + '-1', '%Y-%W-%w') - datetime.timedelta(days=7))
df2['promo_time_week'] = ((df2['date'] - df2['promo_since'])/7).apply(lambda x: x.days).astype(int)
# assortment
df2['assortment'] = df2['assortment'].apply(lambda x: 'basic' if x == 'a' else 'extra' if x == 'b' else 'extended')
# state holiday
df2['state_holiday'] = df2['state_holiday'].apply(lambda x: 'public_holiday' if x == 'a' else 'easter_holiday' if x == 'b' else 'christmas' if x == 'c' else 'regular_day')
df2.head().T
###Output
_____no_output_____
###Markdown
3.0. PASSO 03 - FILTRAGEM DE VARIÁVEIS
###Code
df3 = df2.copy()
df3.head()
###Output
_____no_output_____
###Markdown
3.1. Filtragem das Linhas
###Code
df3 = df3[(df3['open'] != 0) & (df3['sales'] > 0)]
###Output
_____no_output_____
###Markdown
3.2. Selecao das Colunas
###Code
cols_drop = ['customers', 'open', 'promo_interval', 'month_map']
df3 = df3.drop(cols_drop, axis=1)
df3.columns
###Output
_____no_output_____ |
datasets/zoom_test/zoom_smfish_0.ipynb | ###Markdown
ObjectiveThis notebook runs the SpotAnnotationAnalysis pipeline on the results of smfish_0.png from the smfish zoom test. I/O- In: json file containing worker annotations from Quantius.- Out: pandas dataframe containing reliable* cluster centroids.
###Code
from SpotAnnotationAnalysis import SpotAnnotationAnalysis
from QuantiusAnnotation import QuantiusAnnotation
import util
import math
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
Instantiate a BaseAnnotation object and a SpotAnnotationAnalysis object.
###Code
json_filepath = '/Users/jenny.vo-phamhi/Documents/FISH-annotation/Annotation/zoom_sm_fish_test.json'
img_name = 'smfish_0'
img_filename = img_name + '.png'
img_filepath = '/Users/jenny.vo-phamhi/Documents/FISH-annotation/Annotation/zoom_test/smfish_0.png'
csv_filepath = None
qa = QuantiusAnnotation(json_filepath, img_filename)
sa = SpotAnnotationAnalysis(qa)
anno_all = qa.df()
###Output
_____no_output_____
###Markdown
Cluster annotations.
###Code
clustering_params = ['AffinityPropagation', -700]
clusters = sa.get_clusters(anno_all, clustering_params)
util.print_head(clusters)
img_height = anno_all['height'].values[0]
show_workers = True
show_centroids = True
x_bounds, y_bounds, show_ref_points = None, None, False
worker_marker_size, cluster_marker_size, ref_marker_size, bigger_window_size = 8, 40, None, True
plot_title = 'All clusters'
util.visualize_clusters(clusters, show_workers, show_centroids, show_ref_points, worker_marker_size, cluster_marker_size, ref_marker_size, csv_filepath, img_filepath, img_height, x_bounds, y_bounds, plot_title, bigger_window_size)
###Output
_____no_output_____
###Markdown
Get large clusters Sort clusters by number of unique annotators. Visualize cluster size threshold.
###Code
cluster_size_threshold = util.get_cluster_size_threshold(clusters)
small_clusters, large_clusters = util.sort_clusters_by_size(clusters, cluster_size_threshold)
util.plot_cluster_size_threshold(clusters, cluster_size_threshold)
plot_title = 'Clusters with at least ' + str(math.ceil(cluster_size_threshold)) + ' unique annotators'
util.visualize_clusters(large_clusters, show_workers, show_centroids, show_ref_points, worker_marker_size, cluster_marker_size, ref_marker_size, csv_filepath, img_filepath, img_height, x_bounds, y_bounds, plot_title, bigger_window_size)
###Output
_____no_output_____
###Markdown
Get clumpy clusters Sort clusters by fraction of unique annotators who contribute more than once and visualize the clumpiness threshold.
###Code
clumpiness_threshold = util.plot_clumpiness_threshold(large_clusters)
clumpy_clusters, nonclumpy_clusters = util.sort_clusters_by_clumpiness(large_clusters, clumpiness_threshold)
###Output
_____no_output_____
###Markdown
Plot clusters identified as "clumpy."
###Code
show_workers = True
show_centroids = True
plot_title = 'Clumpy clusters, e.g. clusters where at least ' + str(math.floor(100*(1-clumpiness_threshold))) + '% of unique annotators contributed more than once'
util.visualize_clusters(clumpy_clusters, show_workers, show_centroids, show_ref_points, worker_marker_size, cluster_marker_size, ref_marker_size, csv_filepath, img_filepath, img_height, x_bounds, y_bounds, plot_title, bigger_window_size)
###Output
_____no_output_____
###Markdown
Partition clumpy clusters and add the results of partitioning to the dataframe of large, nonclumpy clusters.
###Code
declumping_params = ['KMeans', 2]
result_clusters = nonclumpy_clusters
for i in range(len(clumpy_clusters.index)):
# actual functionality
subclusters = util.declump(clumpy_clusters, i, declumping_params)
result_clusters = pd.concat([subclusters, result_clusters], ignore_index=True)
###Output
_____no_output_____
###Markdown
Visualize all resulting clusters (including original nonclumpy and newly declumped).
###Code
plot_title = 'All large clusters. Clumpy clusters have been declumped.'
util.visualize_clusters(result_clusters, show_workers, show_centroids, show_ref_points, worker_marker_size, cluster_marker_size, ref_marker_size, csv_filepath, img_filepath, img_height, x_bounds, y_bounds, plot_title, bigger_window_size)
result_centroids = result_clusters.as_matrix(columns=['centroid_x', 'centroid_y'])
np.savetxt(img_name + '_coords.csv', result_centroids, delimiter=",", comments='', header = "centroid_x,centroid_y")
###Output
_____no_output_____ |
tutorials/Others/tutorial_multistock_variant_2.ipynb | ###Markdown
Deep Reinforcement Learning for Stock Trading from Scratch: Multiple Stock TradingTutorials to use OpenAI DRL to trade multiple stocks in one Jupyter Notebook | Presented at NeurIPS 2020: Deep RL Workshop* This blog is based on our paper: FinRL: A Deep Reinforcement Learning Library for Automated Stock Trading in Quantitative Finance, presented at NeurIPS 2020: Deep RL Workshop.* Check out medium blog for detailed explanations: https://towardsdatascience.com/finrl-for-quantitative-finance-tutorial-for-multiple-stock-trading-7b00763b7530* Please report any issues to our Github: https://github.com/AI4Finance-LLC/FinRL-Library/issues* **Pytorch Version** Content * [1. Problem Definition](0)* [2. Getting Started - Load Python packages](1) * [2.1. Install Packages](1.1) * [2.2. Check Additional Packages](1.2) * [2.3. Import Packages](1.3) * [2.4. Create Folders](1.4)* [3. Download Data](2)* [4. Preprocess Data](3) * [4.1. Technical Indicators](3.1) * [4.2. Perform Feature Engineering](3.2)* [5.Build Environment](4) * [5.1. Training & Trade Data Split](4.1) * [5.2. User-defined Environment](4.2) * [5.3. Initialize Environment](4.3) * [6.Implement DRL Algorithms](5) * [7.Backtesting Performance](6) * [7.1. BackTestStats](6.1) * [7.2. BackTestPlot](6.2) * [7.3. Baseline Stats](6.3) * [7.3. Compare to Stock Market Index](6.4) Part 1. Problem Definition This problem is to design an automated trading solution for single stock trading. We model the stock trading process as a Markov Decision Process (MDP). We then formulate our trading goal as a maximization problem.The algorithm is trained using Deep Reinforcement Learning (DRL) algorithms and the components of the reinforcement learning environment are:* Action: The action space describes the allowed actions that the agent interacts with theenvironment. Normally, a ∈ A includes three actions: a ∈ {−1, 0, 1}, where −1, 0, 1 representselling, holding, and buying one stock. Also, an action can be carried upon multiple shares. We usean action space {−k, ..., −1, 0, 1, ..., k}, where k denotes the number of shares. For example, "Buy10 shares of AAPL" or "Sell 10 shares of AAPL" are 10 or −10, respectively* Reward function: r(s, a, s′) is the incentive mechanism for an agent to learn a better action. The change of the portfolio value when action a is taken at state s and arriving at new state s', i.e., r(s, a, s′) = v′ − v, where v′ and v represent the portfoliovalues at state s′ and s, respectively* State: The state space describes the observations that the agent receives from the environment. Just as a human trader needs to analyze various information before executing a trade, soour trading agent observes many different features to better learn in an interactive environment.* Environment: Dow 30 consituentsThe data of the single stock that we will be using for this case study is obtained from Yahoo Finance API. The data contains Open-High-Low-Close price and volume. Part 2. Getting Started- ASSUMES USING DOCKER, see readme for instructions 2.1. Add FinRL to your path. You can of course install it as a pipy package, but this is for development purposes.
###Code
import sys
sys.path.append("..")
import pandas as pd
print(pd.__version__)
###Output
1.1.5
###Markdown
2.2. Check if the additional packages needed are present, if not install them. * Yahoo Finance API* pandas* numpy* matplotlib* stockstats* OpenAI gym* stable-baselines* tensorflow* pyfolio 2.3. Import Packages
###Code
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# matplotlib.use('Agg')
import datetime
%matplotlib inline
from finrl import config
from finrl.neo_finrl.preprocessor.yahoodownloader import YahooDownloader
from finrl.neo_finrl.preprocessor.preprocessors import FeatureEngineer, data_split
from finrl.neo_finrl.env_stock_trading.env_stocktrading import StockTradingEnv
from finrl.drl_agents.stablebaselines3.models import DRLAgent
from finrl.plot import backtest_stats, backtest_plot, get_daily_return, get_baseline
from pprint import pprint
###Output
_____no_output_____
###Markdown
2.4. Create Folders
###Code
import os
if not os.path.exists("./" + config.DATA_SAVE_DIR):
os.makedirs("./" + config.DATA_SAVE_DIR)
if not os.path.exists("./" + config.TRAINED_MODEL_DIR):
os.makedirs("./" + config.TRAINED_MODEL_DIR)
if not os.path.exists("./" + config.TENSORBOARD_LOG_DIR):
os.makedirs("./" + config.TENSORBOARD_LOG_DIR)
if not os.path.exists("./" + config.RESULTS_DIR):
os.makedirs("./" + config.RESULTS_DIR)
###Output
_____no_output_____
###Markdown
Part 3. Download DataYahoo Finance is a website that provides stock data, financial news, financial reports, etc. All the data provided by Yahoo Finance is free.* FinRL uses a class **YahooDownloader** to fetch data from Yahoo Finance API* Call Limit: Using the Public API (without authentication), you are limited to 2,000 requests per hour per IP (or up to a total of 48,000 requests a day). -----class YahooDownloader: Provides methods for retrieving daily stock data from Yahoo Finance API Attributes ---------- start_date : str start date of the data (modified from config.py) end_date : str end date of the data (modified from config.py) ticker_list : list a list of stock tickers (modified from config.py) Methods ------- fetch_data() Fetches data from yahoo API
###Code
# from config.py start_date is a string
config.START_DATE
# from config.py end_date is a string
config.END_DATE
print(config.DOW_30_TICKER)
df = YahooDownloader(start_date = '2009-01-01',
end_date = '2021-01-01',
ticker_list = config.DOW_30_TICKER).fetch_data()
df.shape
df.sort_values(['date','tic'],ignore_index=True).head()
###Output
_____no_output_____
###Markdown
Part 4: Preprocess DataData preprocessing is a crucial step for training a high quality machine learning model. We need to check for missing data and do feature engineering in order to convert the data into a model-ready state.* Add technical indicators. In practical trading, various information needs to be taken into account, for example the historical stock prices, current holding shares, technical indicators, etc. In this article, we demonstrate two trend-following technical indicators: MACD and RSI.* Add turbulence index. Risk-aversion reflects whether an investor will choose to preserve the capital. It also influences one's trading strategy when facing different market volatility level. To control the risk in a worst-case scenario, such as financial crisis of 2007–2008, FinRL employs the financial turbulence index that measures extreme asset price fluctuation.
###Code
fe = FeatureEngineer(
use_technical_indicator=True,
tech_indicator_list = config.TECHNICAL_INDICATORS_LIST,
use_turbulence=True,
user_defined_feature = False)
processed = fe.preprocess_data(df)
processed.sort_values(['date','tic'],ignore_index=True).head(10)
###Output
_____no_output_____
###Markdown
Part 5. Design EnvironmentConsidering the stochastic and interactive nature of the automated stock trading tasks, a financial task is modeled as a **Markov Decision Process (MDP)** problem. The training process involves observing stock price change, taking an action and reward's calculation to have the agent adjusting its strategy accordingly. By interacting with the environment, the trading agent will derive a trading strategy with the maximized rewards as time proceeds.Our trading environments, based on OpenAI Gym framework, simulate live stock markets with real market data according to the principle of time-driven simulation.The action space describes the allowed actions that the agent interacts with the environment. Normally, action a includes three actions: {-1, 0, 1}, where -1, 0, 1 represent selling, holding, and buying one share. Also, an action can be carried upon multiple shares. We use an action space {-k,…,-1, 0, 1, …, k}, where k denotes the number of shares to buy and -k denotes the number of shares to sell. For example, "Buy 10 shares of AAPL" or "Sell 10 shares of AAPL" are 10 or -10, respectively. The continuous action space needs to be normalized to [-1, 1], since the policy is defined on a Gaussian distribution, which needs to be normalized and symmetric. Training data split: 2009-01-01 to 2018-12-31 Trade data split: 2019-01-01 to 2020-09-30
###Code
train = data_split(processed, '2009-01-01','2019-01-01')
trade = data_split(processed, '2019-01-01','2021-01-01')
print(len(train))
print(len(trade))
import time
milliseconds = int(round(time.time() * 1000))
print(milliseconds)
import numpy as np
import pandas as pd
from gym.utils import seeding
import gym
from gym import spaces
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pickle
from stable_baselines3.common.vec_env import DummyVecEnv
#from stable_baselines3.common import logger
class StockTradingEnv(gym.Env):
"""A stock trading environment for OpenAI gym"""
metadata = {'render.modes': ['human']}
def __init__(self,
df,
stock_dim,
hmax,
initial_amount,
buy_cost_pct,
sell_cost_pct,
reward_scaling,
state_space,
action_space,
tech_indicator_list,
turbulence_threshold=None,
risk_indicator_col='turbulence',
make_plots = False,
print_verbosity = 10,
day = 0,
initial=True,
previous_state=[],
model_name = '',
mode='',
iteration=''):
self.day = day
self.df = df
self.stock_dim = stock_dim
self.hmax = hmax
self.initial_amount = initial_amount
self.buy_cost_pct = buy_cost_pct
self.sell_cost_pct = sell_cost_pct
self.reward_scaling = reward_scaling
self.state_space = state_space
self.action_space = action_space
self.tech_indicator_list = tech_indicator_list
self.action_space = spaces.Box(low = -1, high = 1,shape = (self.action_space,))
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape = (self.state_space,))
self.data = self.df.loc[self.day,:]
self.terminal = False
self.make_plots = make_plots
self.print_verbosity = print_verbosity
self.turbulence_threshold = turbulence_threshold
self.risk_indicator_col = risk_indicator_col
self.initial = initial
self.previous_state = previous_state
self.model_name=model_name
self.mode=mode
self.iteration=iteration
# initalize state
self.state = self._initiate_state()
# initialize reward
self.reward = 0
self.turbulence = 0
self.cost = 0
self.trades = 0
self.episode = 0
# memorize all the total balance change
self.asset_memory = [self.initial_amount]
self.rewards_memory = []
self.actions_memory=[]
self.date_memory=[self._get_date()]
#self.reset()
self._seed()
def _sell_stock(self, index, action):
def _do_sell_normal():
if self.state[index+1]>0:
# Sell only if the price is > 0 (no missing data in this particular date)
# perform sell action based on the sign of the action
if self.state[index+self.stock_dim+1] > 0:
# Sell only if current asset is > 0
sell_num_shares = min(abs(action),self.state[index+self.stock_dim+1])
sell_amount = self.state[index+1] * sell_num_shares * (1- self.sell_cost_pct)
#update balance
self.state[0] += sell_amount
self.state[index+self.stock_dim+1] -= sell_num_shares
self.cost +=self.state[index+1] * sell_num_shares * self.sell_cost_pct
self.trades+=1
else:
sell_num_shares = 0
else:
sell_num_shares = 0
return sell_num_shares
# perform sell action based on the sign of the action
if self.turbulence_threshold is not None:
if self.turbulence>=self.turbulence_threshold:
if self.state[index+1]>0:
# Sell only if the price is > 0 (no missing data in this particular date)
# if turbulence goes over threshold, just clear out all positions
if self.state[index+self.stock_dim+1] > 0:
# Sell only if current asset is > 0
sell_num_shares = self.state[index+self.stock_dim+1]
sell_amount = self.state[index+1]*sell_num_shares* (1- self.sell_cost_pct)
#update balance
self.state[0] += sell_amount
self.state[index+self.stock_dim+1] =0
self.cost += self.state[index+1]*sell_num_shares* \
self.sell_cost_pct
self.trades+=1
else:
sell_num_shares = 0
else:
sell_num_shares = 0
else:
sell_num_shares = _do_sell_normal()
else:
sell_num_shares = _do_sell_normal()
return sell_num_shares
def _buy_stock(self, index, action):
def _do_buy():
if self.state[index+1]>0:
#Buy only if the price is > 0 (no missing data in this particular date)
available_amount = self.state[0] // self.state[index+1]
# print('available_amount:{}'.format(available_amount))
#update balance
buy_num_shares = min(available_amount, action)
buy_amount = self.state[index+1] * buy_num_shares * (1+ self.buy_cost_pct)
self.state[0] -= buy_amount
self.state[index+self.stock_dim+1] += buy_num_shares
self.cost+=self.state[index+1] * buy_num_shares * self.buy_cost_pct
self.trades+=1
else:
buy_num_shares = 0
return buy_num_shares
# perform buy action based on the sign of the action
if self.turbulence_threshold is None:
buy_num_shares = _do_buy()
else:
if self.turbulence< self.turbulence_threshold:
buy_num_shares = _do_buy()
else:
buy_num_shares = 0
pass
return buy_num_shares
def _make_plot(self):
plt.plot(self.asset_memory,'r')
plt.savefig('results/account_value_trade_{}.png'.format(self.episode))
plt.close()
def step(self, actions):
self.terminal = self.day >= len(self.df.index.unique())-1
if self.terminal:
# print(f"Episode: {self.episode}")
if self.make_plots:
self._make_plot()
end_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))
df_total_value = pd.DataFrame(self.asset_memory)
tot_reward = self.state[0]+sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))- self.initial_amount
df_total_value.columns = ['account_value']
df_total_value['date'] = self.date_memory
df_total_value['daily_return']=df_total_value['account_value'].pct_change(1)
if df_total_value['daily_return'].std() !=0:
sharpe = (252**0.5)*df_total_value['daily_return'].mean()/ \
df_total_value['daily_return'].std()
df_rewards = pd.DataFrame(self.rewards_memory)
df_rewards.columns = ['account_rewards']
df_rewards['date'] = self.date_memory[:-1]
if self.episode % self.print_verbosity == 0:
print(f"day: {self.day}, episode: {self.episode}")
print(f"begin_total_asset: {self.asset_memory[0]:0.2f}")
print(f"end_total_asset: {end_total_asset:0.2f}")
print(f"total_reward: {tot_reward:0.2f}")
print(f"total_cost: {self.cost:0.2f}")
print(f"total_trades: {self.trades}")
if df_total_value['daily_return'].std() != 0:
print(f"Sharpe: {sharpe:0.3f}")
print("=================================")
if (self.model_name!='') and (self.mode!=''):
df_actions = self.save_action_memory()
df_actions.to_csv('results/actions_{}_{}_{}.csv'.format(self.mode,self.model_name, self.iteration))
df_total_value.to_csv('results/account_value_{}_{}_{}.csv'.format(self.mode,self.model_name, self.iteration),index=False)
df_rewards.to_csv('results/account_rewards_{}_{}_{}.csv'.format(self.mode,self.model_name, self.iteration),index=False)
plt.plot(self.asset_memory,'r')
plt.savefig('results/account_value_{}_{}_{}.png'.format(self.mode,self.model_name, self.iteration),index=False)
plt.close()
# Add outputs to logger interface
#logger.record("environment/portfolio_value", end_total_asset)
#logger.record("environment/total_reward", tot_reward)
#logger.record("environment/total_reward_pct", (tot_reward / (end_total_asset - tot_reward)) * 100)
#logger.record("environment/total_cost", self.cost)
#logger.record("environment/total_trades", self.trades)
return self.state, self.reward, self.terminal, {}
else:
actions = actions * self.hmax #actions initially is scaled between 0 to 1
actions = (actions.astype(int)) #convert into integer because we can't by fraction of shares
if self.turbulence_threshold is not None:
if self.turbulence>=self.turbulence_threshold:
actions=np.array([-self.hmax]*self.stock_dim)
begin_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))
#print("begin_total_asset:{}".format(begin_total_asset))
argsort_actions = np.argsort(actions)
sell_index = argsort_actions[:np.where(actions < 0)[0].shape[0]]
buy_index = argsort_actions[::-1][:np.where(actions > 0)[0].shape[0]]
for index in sell_index:
# print(f"Num shares before: {self.state[index+self.stock_dim+1]}")
# print(f'take sell action before : {actions[index]}')
actions[index] = self._sell_stock(index, actions[index]) * (-1)
# print(f'take sell action after : {actions[index]}')
# print(f"Num shares after: {self.state[index+self.stock_dim+1]}")
for index in buy_index:
# print('take buy action: {}'.format(actions[index]))
actions[index] = self._buy_stock(index, actions[index])
self.actions_memory.append(actions)
#state: s -> s+1
self.day += 1
self.data = self.df.loc[self.day,:]
if self.turbulence_threshold is not None:
self.turbulence = self.data[self.risk_indicator_col].values[0]
self.state = self._update_state()
end_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))
self.asset_memory.append(end_total_asset)
self.date_memory.append(self._get_date())
self.reward = end_total_asset - begin_total_asset
self.rewards_memory.append(self.reward)
self.reward = self.reward*self.reward_scaling
return self.state, self.reward, self.terminal, {}
def reset(self):
#initiate state
self.state = self._initiate_state()
if self.initial:
self.asset_memory = [self.initial_amount]
else:
previous_total_asset = self.previous_state[0]+ \
sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.previous_state[(self.stock_dim+1):(self.stock_dim*2+1)]))
self.asset_memory = [previous_total_asset]
self.day = 0
self.data = self.df.loc[self.day,:]
self.turbulence = 0
self.cost = 0
self.trades = 0
self.terminal = False
# self.iteration=self.iteration
self.rewards_memory = []
self.actions_memory=[]
self.date_memory=[self._get_date()]
self.episode+=1
return self.state
def render(self, mode='human',close=False):
return self.state
def _initiate_state(self):
if self.initial:
# For Initial State
if len(self.df.tic.unique())>1:
# for multiple stock
state = [self.initial_amount] + \
self.data.close.values.tolist() + \
[0]*self.stock_dim + \
sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [])
else:
# for single stock
state = [self.initial_amount] + \
[self.data.close] + \
[0]*self.stock_dim + \
sum([[self.data[tech]] for tech in self.tech_indicator_list ], [])
else:
#Using Previous State
if len(self.df.tic.unique())>1:
# for multiple stock
state = [self.previous_state[0]] + \
self.data.close.values.tolist() + \
self.previous_state[(self.stock_dim+1):(self.stock_dim*2+1)] + \
sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [])
else:
# for single stock
state = [self.previous_state[0]] + \
[self.data.close] + \
self.previous_state[(self.stock_dim+1):(self.stock_dim*2+1)] + \
sum([[self.data[tech]] for tech in self.tech_indicator_list ], [])
return state
def _update_state(self):
if len(self.df.tic.unique())>1:
# for multiple stock
state = [self.state[0]] + \
self.data.close.values.tolist() + \
list(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]) + \
sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [])
else:
# for single stock
state = [self.state[0]] + \
[self.data.close] + \
list(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]) + \
sum([[self.data[tech]] for tech in self.tech_indicator_list ], [])
return state
def _get_date(self):
if len(self.df.tic.unique())>1:
date = self.data.date.unique()[0]
else:
date = self.data.date
return date
def save_asset_memory(self):
date_list = self.date_memory
asset_list = self.asset_memory
#print(len(date_list))
#print(len(asset_list))
df_account_value = pd.DataFrame({'date':date_list,'account_value':asset_list})
return df_account_value
def save_action_memory(self):
if len(self.df.tic.unique())>1:
# date and close price length must match actions length
date_list = self.date_memory[:-1]
df_date = pd.DataFrame(date_list)
df_date.columns = ['date']
action_list = self.actions_memory
df_actions = pd.DataFrame(action_list)
df_actions.columns = self.data.tic.values
df_actions.index = df_date.date
#df_actions = pd.DataFrame({'date':date_list,'actions':action_list})
else:
date_list = self.date_memory[:-1]
action_list = self.actions_memory
df_actions = pd.DataFrame({'date':date_list,'actions':action_list})
return df_actions
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def get_sb_env(self):
e = DummyVecEnv([lambda: self])
obs = e.reset()
return e, obs
print(StockTradingEnv.__doc__)
###Output
A stock trading environment for OpenAI gym
###Markdown
state spaceThe state space of the observation is as follows `start_cash, , for j in assets>`indicators are any daily measurement you can achieve. Common ones are 'volume', 'open' 'close' 'high', 'low'.However, you can add these as needed, The feature engineer adds indicators, and you can add your own as well.
###Code
information_cols = ['open', 'high', 'low', 'close', 'volume', 'day', 'macd', 'rsi_30', 'cci_30', 'dx_30', 'turbulence']
env_kwargs = {
"hmax": 5000,
"daily_reward": 5,
"out_of_cash_penalty": 0,
"cash_penalty_proportion": 0.1,
"daily_information_cols": information_cols,
"print_verbosity": 500,
}
e_train_gym = StockTradingEnv(df = train, cache_indicator_data=True,
**env_kwargs)
# e_train_obs = StockTradingEnvV2(df = train, cache_indicator_data=False, **env_kwargs)
# e_trade_gym = StockTradingEnvV2(df = train,**env_kwargs)
# env_trade, obs_trade = e_trade_gym.get_sb_env()
###Output
_____no_output_____
###Markdown
Environment for TrainingThere are two available environments. The multiprocessing and the single processing env. Some models won't work with multiprocessing. ```python single processingenv_train, _ = e_train_gym.get_sb_env()multiprocessingenv_train, _ = e_train_gym.get_multiproc_env(n = )```
###Code
# for this example, let's do multiprocessing with n_cores-2
import multiprocessing
n_cores = multiprocessing.cpu_count() - 2
n_cores = 24
print(f"using {n_cores} cores")
#
e_train_gym.print_verbosity = 500
env_train, _ = e_train_gym.get_multiproc_env(n = n_cores)
# env_train, _ = e_train_gym.get_sb_env()
env_train_obs, _ = e_train_gym.get_sb_env()
###Output
using 24 cores
###Markdown
Part 6: Implement DRL Algorithms* The implementation of the DRL algorithms are based on **OpenAI Baselines** and **Stable Baselines**. Stable Baselines is a fork of OpenAI Baselines, with a major structural refactoring, and code cleanups.* FinRL library includes fine-tuned standard DRL algorithms, such as DQN, DDPG,Multi-Agent DDPG, PPO, SAC, A2C and TD3. We also allow users todesign their own DRL algorithms by adapting these DRL algorithms.
###Code
agent = DRLAgent(env = env_train)
print(config.PPO_PARAMS)
###Output
{'n_steps': 2048, 'ent_coef': 0.01, 'learning_rate': 0.00025, 'batch_size': 64}
###Markdown
Model Training: 5 models, A2C DDPG, PPO, TD3, SAC Model 1: A2C
###Code
from torch.nn import Softsign, ReLU
ppo_params ={'n_steps': 256,
'ent_coef': 0.01,
'learning_rate': 0.00001,
'batch_size': 256,
'gamma': 0.99}
policy_kwargs = {
# "activation_fn": ReLU,
"net_arch": [1024, 1024, 1024, 1024],
# "squash_output": True
}
model = agent.get_model("ppo", model_kwargs = ppo_params, policy_kwargs = policy_kwargs, verbose = 0)
model.learn(total_timesteps = 10000000,
eval_env = env_train_obs,
eval_freq = 1000,
log_interval = 1,
tb_log_name = 'cashbuffer_1_16_longrun',
n_eval_episodes = 1,
reset_num_timesteps = True)
model.save("quicksave_ppo_dow_1_17.model")
data_turbulence = processed[(processed.date<'2019-01-01') & (processed.date>='2009-01-01')]
insample_turbulence = data_turbulence.drop_duplicates(subset=['date'])
insample_turbulence.turbulence.describe()
turbulence_threshold = np.quantile(insample_turbulence.turbulence.values,1)
turbulence_threshold
###Output
_____no_output_____
###Markdown
TradeDRL model needs to update periodically in order to take full advantage of the data, ideally we need to retrain our model yearly, quarterly, or monthly. We also need to tune the parameters along the way, in this notebook I only use the in-sample data from 2009-01 to 2018-12 to tune the parameters once, so there is some alpha decay here as the length of trade date extends. Numerous hyperparameters – e.g. the learning rate, the total number of samples to train on – influence the learning process and are usually determined by testing some variations.
###Code
trade = data_split(processed, '2019-01-01','2021-01-01')
env_kwargs = {
"hmax": 5000,
"daily_reward": 5,
"out_of_cash_penalty": 0,
"cash_penalty_proportion": 0.1,
"daily_information_cols": information_cols,
"print_verbosity": 50,
"random_start": False,
"cache_indicator_data": False
}
e_trade_gym = StockTradingEnvV2(df = trade,**env_kwargs)
env_trade, obs_trade = e_trade_gym.get_sb_env()
print(len(e_trade_gym.dates))
df_account_value, df_actions = DRLAgent.DRL_prediction(model=model,
environment=e_trade_gym)
df_account_value.shape
df_account_value.head(50)
###Output
_____no_output_____
###Markdown
Part 7: Backtest Our StrategyBacktesting plays a key role in evaluating the performance of a trading strategy. Automated backtesting tool is preferred because it reduces the human error. We usually use the Quantopian pyfolio package to backtest our trading strategies. It is easy to use and consists of various individual plots that provide a comprehensive image of the performance of a trading strategy. 7.1 BackTestStatspass in df_account_value, this information is stored in env class
###Code
print("==============Get Backtest Results===========")
now = datetime.datetime.now().strftime('%Y%m%d-%Hh%M')
perf_stats_all = backtest_stats(account_value=df_account_value, value_col_name = 'total_assets')
perf_stats_all = pd.DataFrame(perf_stats_all)
perf_stats_all.to_csv("./"+config.RESULTS_DIR+"/perf_stats_all_"+now+'.csv')
###Output
==============Get Backtest Results===========
annual return: -0.31429838045686775
sharpe ratio: -0.03215222673502538
Annual return -0.007905
Cumulative returns -0.015717
Annual volatility 0.097907
Sharpe ratio -0.032152
Calmar ratio -0.050407
Stability 0.353511
Max drawdown -0.156831
Omega ratio 0.993158
Sortino ratio -0.044483
Skew -0.219349
Kurtosis 9.495171
Tail ratio 0.946898
Daily value at risk -0.012348
Alpha 0.000000
Beta 1.000000
dtype: float64
###Markdown
7.2 BackTestPlot
###Code
print("==============Compare to DJIA===========")
%matplotlib inline
# S&P 500: ^GSPC
# Dow Jones Index: ^DJI
# NASDAQ 100: ^NDX
backtest_plot(df_account_value,
baseline_ticker = '^DJI',
baseline_start = '2019-01-01',
baseline_end = '2021-01-01', value_col_name = 'total_assets')
###Output
==============Compare to DJIA===========
annual return: -0.31429838045686775
sharpe ratio: -0.03215222673502538
[*********************100%***********************] 1 of 1 completed
Shape of DataFrame: (505, 8)
###Markdown
7.3 Baseline Stats
###Code
print("==============Get Baseline Stats===========")
baseline_df = get_baseline(
ticker="^DJI",
start = '2019-01-01',
end = '2021-01-01')
baseline_stats = backtest_stats(baseline_df, value_col_name = 'close')
###Output
==============Get Baseline Stats===========
[*********************100%***********************] 1 of 1 completed
Shape of DataFrame: (505, 8)
Annual return 0.144674
Cumulative returns 0.310981
Annual volatility 0.274619
Sharpe ratio 0.631418
Calmar ratio 0.390102
Stability 0.116677
Max drawdown -0.370862
Omega ratio 1.149365
Sortino ratio 0.870084
Skew NaN
Kurtosis NaN
Tail ratio 0.860710
Daily value at risk -0.033911
Alpha 0.000000
Beta 1.000000
dtype: float64
###Markdown
Deep Reinforcement Learning for Stock Trading from Scratch: Multiple Stock TradingTutorials to use OpenAI DRL to trade multiple stocks in one Jupyter Notebook | Presented at NeurIPS 2020: Deep RL Workshop* This blog is based on our paper: FinRL: A Deep Reinforcement Learning Library for Automated Stock Trading in Quantitative Finance, presented at NeurIPS 2020: Deep RL Workshop.* Check out medium blog for detailed explanations: https://towardsdatascience.com/finrl-for-quantitative-finance-tutorial-for-multiple-stock-trading-7b00763b7530* Please report any issues to our Github: https://github.com/AI4Finance-LLC/FinRL-Library/issues* **Pytorch Version** Content * [1. Problem Definition](0)* [2. Getting Started - Load Python packages](1) * [2.1. Install Packages](1.1) * [2.2. Check Additional Packages](1.2) * [2.3. Import Packages](1.3) * [2.4. Create Folders](1.4)* [3. Download Data](2)* [4. Preprocess Data](3) * [4.1. Technical Indicators](3.1) * [4.2. Perform Feature Engineering](3.2)* [5.Build Environment](4) * [5.1. Training & Trade Data Split](4.1) * [5.2. User-defined Environment](4.2) * [5.3. Initialize Environment](4.3) * [6.Implement DRL Algorithms](5) * [7.Backtesting Performance](6) * [7.1. BackTestStats](6.1) * [7.2. BackTestPlot](6.2) * [7.3. Baseline Stats](6.3) * [7.3. Compare to Stock Market Index](6.4) Part 1. Problem Definition This problem is to design an automated trading solution for single stock trading. We model the stock trading process as a Markov Decision Process (MDP). We then formulate our trading goal as a maximization problem.The algorithm is trained using Deep Reinforcement Learning (DRL) algorithms and the components of the reinforcement learning environment are:* Action: The action space describes the allowed actions that the agent interacts with theenvironment. Normally, a ∈ A includes three actions: a ∈ {−1, 0, 1}, where −1, 0, 1 representselling, holding, and buying one stock. Also, an action can be carried upon multiple shares. We usean action space {−k, ..., −1, 0, 1, ..., k}, where k denotes the number of shares. For example, "Buy10 shares of AAPL" or "Sell 10 shares of AAPL" are 10 or −10, respectively* Reward function: r(s, a, s′) is the incentive mechanism for an agent to learn a better action. The change of the portfolio value when action a is taken at state s and arriving at new state s', i.e., r(s, a, s′) = v′ − v, where v′ and v represent the portfoliovalues at state s′ and s, respectively* State: The state space describes the observations that the agent receives from the environment. Just as a human trader needs to analyze various information before executing a trade, soour trading agent observes many different features to better learn in an interactive environment.* Environment: Dow 30 consituentsThe data of the single stock that we will be using for this case study is obtained from Yahoo Finance API. The data contains Open-High-Low-Close price and volume. Part 2. Getting Started- ASSUMES USING DOCKER, see readme for instructions 2.1. Add FinRL to your path. You can of course install it as a pipy package, but this is for development purposes.
###Code
import sys
sys.path.append("..")
import pandas as pd
print(pd.__version__)
###Output
1.1.5
###Markdown
2.2. Check if the additional packages needed are present, if not install them. * Yahoo Finance API* pandas* numpy* matplotlib* stockstats* OpenAI gym* stable-baselines* tensorflow* pyfolio 2.3. Import Packages
###Code
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# matplotlib.use('Agg')
import datetime
%matplotlib inline
from finrl.apps import config
from finrl.neo_finrl.preprocessor.yahoodownloader import YahooDownloader
from finrl.neo_finrl.preprocessor.preprocessors import FeatureEngineer, data_split
from finrl.neo_finrl.env_stock_trading.env_stocktrading import StockTradingEnv
from finrl.drl_agents.stablebaselines3.models import DRLAgent
from finrl.plot import backtest_stats, backtest_plot, get_daily_return, get_baseline
from pprint import pprint
###Output
_____no_output_____
###Markdown
2.4. Create Folders
###Code
import os
if not os.path.exists("./" + config.DATA_SAVE_DIR):
os.makedirs("./" + config.DATA_SAVE_DIR)
if not os.path.exists("./" + config.TRAINED_MODEL_DIR):
os.makedirs("./" + config.TRAINED_MODEL_DIR)
if not os.path.exists("./" + config.TENSORBOARD_LOG_DIR):
os.makedirs("./" + config.TENSORBOARD_LOG_DIR)
if not os.path.exists("./" + config.RESULTS_DIR):
os.makedirs("./" + config.RESULTS_DIR)
###Output
_____no_output_____
###Markdown
Part 3. Download DataYahoo Finance is a website that provides stock data, financial news, financial reports, etc. All the data provided by Yahoo Finance is free.* FinRL uses a class **YahooDownloader** to fetch data from Yahoo Finance API* Call Limit: Using the Public API (without authentication), you are limited to 2,000 requests per hour per IP (or up to a total of 48,000 requests a day). -----class YahooDownloader: Provides methods for retrieving daily stock data from Yahoo Finance API Attributes ---------- start_date : str start date of the data (modified from config.py) end_date : str end date of the data (modified from config.py) ticker_list : list a list of stock tickers (modified from config.py) Methods ------- fetch_data() Fetches data from yahoo API
###Code
# from config.py start_date is a string
config.START_DATE
# from config.py end_date is a string
config.END_DATE
print(config.DOW_30_TICKER)
df = YahooDownloader(start_date = '2009-01-01',
end_date = '2021-01-01',
ticker_list = config.DOW_30_TICKER).fetch_data()
df.shape
df.sort_values(['date','tic'],ignore_index=True).head()
###Output
_____no_output_____
###Markdown
Part 4: Preprocess DataData preprocessing is a crucial step for training a high quality machine learning model. We need to check for missing data and do feature engineering in order to convert the data into a model-ready state.* Add technical indicators. In practical trading, various information needs to be taken into account, for example the historical stock prices, current holding shares, technical indicators, etc. In this article, we demonstrate two trend-following technical indicators: MACD and RSI.* Add turbulence index. Risk-aversion reflects whether an investor will choose to preserve the capital. It also influences one's trading strategy when facing different market volatility level. To control the risk in a worst-case scenario, such as financial crisis of 2007–2008, FinRL employs the financial turbulence index that measures extreme asset price fluctuation.
###Code
fe = FeatureEngineer(
use_technical_indicator=True,
tech_indicator_list = config.TECHNICAL_INDICATORS_LIST,
use_turbulence=True,
user_defined_feature = False)
processed = fe.preprocess_data(df)
processed.sort_values(['date','tic'],ignore_index=True).head(10)
###Output
_____no_output_____
###Markdown
Part 5. Design EnvironmentConsidering the stochastic and interactive nature of the automated stock trading tasks, a financial task is modeled as a **Markov Decision Process (MDP)** problem. The training process involves observing stock price change, taking an action and reward's calculation to have the agent adjusting its strategy accordingly. By interacting with the environment, the trading agent will derive a trading strategy with the maximized rewards as time proceeds.Our trading environments, based on OpenAI Gym framework, simulate live stock markets with real market data according to the principle of time-driven simulation.The action space describes the allowed actions that the agent interacts with the environment. Normally, action a includes three actions: {-1, 0, 1}, where -1, 0, 1 represent selling, holding, and buying one share. Also, an action can be carried upon multiple shares. We use an action space {-k,…,-1, 0, 1, …, k}, where k denotes the number of shares to buy and -k denotes the number of shares to sell. For example, "Buy 10 shares of AAPL" or "Sell 10 shares of AAPL" are 10 or -10, respectively. The continuous action space needs to be normalized to [-1, 1], since the policy is defined on a Gaussian distribution, which needs to be normalized and symmetric. Training data split: 2009-01-01 to 2018-12-31 Trade data split: 2019-01-01 to 2020-09-30
###Code
train = data_split(processed, '2009-01-01','2019-01-01')
trade = data_split(processed, '2019-01-01','2021-01-01')
print(len(train))
print(len(trade))
import time
milliseconds = int(round(time.time() * 1000))
print(milliseconds)
import numpy as np
import pandas as pd
from gym.utils import seeding
import gym
from gym import spaces
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pickle
from stable_baselines3.common.vec_env import DummyVecEnv
#from stable_baselines3.common import logger
class StockTradingEnv(gym.Env):
"""A stock trading environment for OpenAI gym"""
metadata = {'render.modes': ['human']}
def __init__(self,
df,
stock_dim,
hmax,
initial_amount,
buy_cost_pct,
sell_cost_pct,
reward_scaling,
state_space,
action_space,
tech_indicator_list,
turbulence_threshold=None,
risk_indicator_col='turbulence',
make_plots = False,
print_verbosity = 10,
day = 0,
initial=True,
previous_state=[],
model_name = '',
mode='',
iteration=''):
self.day = day
self.df = df
self.stock_dim = stock_dim
self.hmax = hmax
self.initial_amount = initial_amount
self.buy_cost_pct = buy_cost_pct
self.sell_cost_pct = sell_cost_pct
self.reward_scaling = reward_scaling
self.state_space = state_space
self.action_space = action_space
self.tech_indicator_list = tech_indicator_list
self.action_space = spaces.Box(low = -1, high = 1,shape = (self.action_space,))
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape = (self.state_space,))
self.data = self.df.loc[self.day,:]
self.terminal = False
self.make_plots = make_plots
self.print_verbosity = print_verbosity
self.turbulence_threshold = turbulence_threshold
self.risk_indicator_col = risk_indicator_col
self.initial = initial
self.previous_state = previous_state
self.model_name=model_name
self.mode=mode
self.iteration=iteration
# initalize state
self.state = self._initiate_state()
# initialize reward
self.reward = 0
self.turbulence = 0
self.cost = 0
self.trades = 0
self.episode = 0
# memorize all the total balance change
self.asset_memory = [self.initial_amount]
self.rewards_memory = []
self.actions_memory=[]
self.date_memory=[self._get_date()]
#self.reset()
self._seed()
def _sell_stock(self, index, action):
def _do_sell_normal():
if self.state[index+1]>0:
# Sell only if the price is > 0 (no missing data in this particular date)
# perform sell action based on the sign of the action
if self.state[index+self.stock_dim+1] > 0:
# Sell only if current asset is > 0
sell_num_shares = min(abs(action),self.state[index+self.stock_dim+1])
sell_amount = self.state[index+1] * sell_num_shares * (1- self.sell_cost_pct)
#update balance
self.state[0] += sell_amount
self.state[index+self.stock_dim+1] -= sell_num_shares
self.cost +=self.state[index+1] * sell_num_shares * self.sell_cost_pct
self.trades+=1
else:
sell_num_shares = 0
else:
sell_num_shares = 0
return sell_num_shares
# perform sell action based on the sign of the action
if self.turbulence_threshold is not None:
if self.turbulence>=self.turbulence_threshold:
if self.state[index+1]>0:
# Sell only if the price is > 0 (no missing data in this particular date)
# if turbulence goes over threshold, just clear out all positions
if self.state[index+self.stock_dim+1] > 0:
# Sell only if current asset is > 0
sell_num_shares = self.state[index+self.stock_dim+1]
sell_amount = self.state[index+1]*sell_num_shares* (1- self.sell_cost_pct)
#update balance
self.state[0] += sell_amount
self.state[index+self.stock_dim+1] =0
self.cost += self.state[index+1]*sell_num_shares* \
self.sell_cost_pct
self.trades+=1
else:
sell_num_shares = 0
else:
sell_num_shares = 0
else:
sell_num_shares = _do_sell_normal()
else:
sell_num_shares = _do_sell_normal()
return sell_num_shares
def _buy_stock(self, index, action):
def _do_buy():
if self.state[index+1]>0:
#Buy only if the price is > 0 (no missing data in this particular date)
available_amount = self.state[0] // self.state[index+1]
# print('available_amount:{}'.format(available_amount))
#update balance
buy_num_shares = min(available_amount, action)
buy_amount = self.state[index+1] * buy_num_shares * (1+ self.buy_cost_pct)
self.state[0] -= buy_amount
self.state[index+self.stock_dim+1] += buy_num_shares
self.cost+=self.state[index+1] * buy_num_shares * self.buy_cost_pct
self.trades+=1
else:
buy_num_shares = 0
return buy_num_shares
# perform buy action based on the sign of the action
if self.turbulence_threshold is None:
buy_num_shares = _do_buy()
else:
if self.turbulence< self.turbulence_threshold:
buy_num_shares = _do_buy()
else:
buy_num_shares = 0
pass
return buy_num_shares
def _make_plot(self):
plt.plot(self.asset_memory,'r')
plt.savefig('results/account_value_trade_{}.png'.format(self.episode))
plt.close()
def step(self, actions):
self.terminal = self.day >= len(self.df.index.unique())-1
if self.terminal:
# print(f"Episode: {self.episode}")
if self.make_plots:
self._make_plot()
end_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))
df_total_value = pd.DataFrame(self.asset_memory)
tot_reward = self.state[0]+sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))- self.initial_amount
df_total_value.columns = ['account_value']
df_total_value['date'] = self.date_memory
df_total_value['daily_return']=df_total_value['account_value'].pct_change(1)
if df_total_value['daily_return'].std() !=0:
sharpe = (252**0.5)*df_total_value['daily_return'].mean()/ \
df_total_value['daily_return'].std()
df_rewards = pd.DataFrame(self.rewards_memory)
df_rewards.columns = ['account_rewards']
df_rewards['date'] = self.date_memory[:-1]
if self.episode % self.print_verbosity == 0:
print(f"day: {self.day}, episode: {self.episode}")
print(f"begin_total_asset: {self.asset_memory[0]:0.2f}")
print(f"end_total_asset: {end_total_asset:0.2f}")
print(f"total_reward: {tot_reward:0.2f}")
print(f"total_cost: {self.cost:0.2f}")
print(f"total_trades: {self.trades}")
if df_total_value['daily_return'].std() != 0:
print(f"Sharpe: {sharpe:0.3f}")
print("=================================")
if (self.model_name!='') and (self.mode!=''):
df_actions = self.save_action_memory()
df_actions.to_csv('results/actions_{}_{}_{}.csv'.format(self.mode,self.model_name, self.iteration))
df_total_value.to_csv('results/account_value_{}_{}_{}.csv'.format(self.mode,self.model_name, self.iteration),index=False)
df_rewards.to_csv('results/account_rewards_{}_{}_{}.csv'.format(self.mode,self.model_name, self.iteration),index=False)
plt.plot(self.asset_memory,'r')
plt.savefig('results/account_value_{}_{}_{}.png'.format(self.mode,self.model_name, self.iteration),index=False)
plt.close()
# Add outputs to logger interface
#logger.record("environment/portfolio_value", end_total_asset)
#logger.record("environment/total_reward", tot_reward)
#logger.record("environment/total_reward_pct", (tot_reward / (end_total_asset - tot_reward)) * 100)
#logger.record("environment/total_cost", self.cost)
#logger.record("environment/total_trades", self.trades)
return self.state, self.reward, self.terminal, {}
else:
actions = actions * self.hmax #actions initially is scaled between 0 to 1
actions = (actions.astype(int)) #convert into integer because we can't by fraction of shares
if self.turbulence_threshold is not None:
if self.turbulence>=self.turbulence_threshold:
actions=np.array([-self.hmax]*self.stock_dim)
begin_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))
#print("begin_total_asset:{}".format(begin_total_asset))
argsort_actions = np.argsort(actions)
sell_index = argsort_actions[:np.where(actions < 0)[0].shape[0]]
buy_index = argsort_actions[::-1][:np.where(actions > 0)[0].shape[0]]
for index in sell_index:
# print(f"Num shares before: {self.state[index+self.stock_dim+1]}")
# print(f'take sell action before : {actions[index]}')
actions[index] = self._sell_stock(index, actions[index]) * (-1)
# print(f'take sell action after : {actions[index]}')
# print(f"Num shares after: {self.state[index+self.stock_dim+1]}")
for index in buy_index:
# print('take buy action: {}'.format(actions[index]))
actions[index] = self._buy_stock(index, actions[index])
self.actions_memory.append(actions)
#state: s -> s+1
self.day += 1
self.data = self.df.loc[self.day,:]
if self.turbulence_threshold is not None:
self.turbulence = self.data[self.risk_indicator_col].values[0]
self.state = self._update_state()
end_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))
self.asset_memory.append(end_total_asset)
self.date_memory.append(self._get_date())
self.reward = end_total_asset - begin_total_asset
self.rewards_memory.append(self.reward)
self.reward = self.reward*self.reward_scaling
return self.state, self.reward, self.terminal, {}
def reset(self):
#initiate state
self.state = self._initiate_state()
if self.initial:
self.asset_memory = [self.initial_amount]
else:
previous_total_asset = self.previous_state[0]+ \
sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.previous_state[(self.stock_dim+1):(self.stock_dim*2+1)]))
self.asset_memory = [previous_total_asset]
self.day = 0
self.data = self.df.loc[self.day,:]
self.turbulence = 0
self.cost = 0
self.trades = 0
self.terminal = False
# self.iteration=self.iteration
self.rewards_memory = []
self.actions_memory=[]
self.date_memory=[self._get_date()]
self.episode+=1
return self.state
def render(self, mode='human',close=False):
return self.state
def _initiate_state(self):
if self.initial:
# For Initial State
if len(self.df.tic.unique())>1:
# for multiple stock
state = [self.initial_amount] + \
self.data.close.values.tolist() + \
[0]*self.stock_dim + \
sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [])
else:
# for single stock
state = [self.initial_amount] + \
[self.data.close] + \
[0]*self.stock_dim + \
sum([[self.data[tech]] for tech in self.tech_indicator_list ], [])
else:
#Using Previous State
if len(self.df.tic.unique())>1:
# for multiple stock
state = [self.previous_state[0]] + \
self.data.close.values.tolist() + \
self.previous_state[(self.stock_dim+1):(self.stock_dim*2+1)] + \
sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [])
else:
# for single stock
state = [self.previous_state[0]] + \
[self.data.close] + \
self.previous_state[(self.stock_dim+1):(self.stock_dim*2+1)] + \
sum([[self.data[tech]] for tech in self.tech_indicator_list ], [])
return state
def _update_state(self):
if len(self.df.tic.unique())>1:
# for multiple stock
state = [self.state[0]] + \
self.data.close.values.tolist() + \
list(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]) + \
sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [])
else:
# for single stock
state = [self.state[0]] + \
[self.data.close] + \
list(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]) + \
sum([[self.data[tech]] for tech in self.tech_indicator_list ], [])
return state
def _get_date(self):
if len(self.df.tic.unique())>1:
date = self.data.date.unique()[0]
else:
date = self.data.date
return date
def save_asset_memory(self):
date_list = self.date_memory
asset_list = self.asset_memory
#print(len(date_list))
#print(len(asset_list))
df_account_value = pd.DataFrame({'date':date_list,'account_value':asset_list})
return df_account_value
def save_action_memory(self):
if len(self.df.tic.unique())>1:
# date and close price length must match actions length
date_list = self.date_memory[:-1]
df_date = pd.DataFrame(date_list)
df_date.columns = ['date']
action_list = self.actions_memory
df_actions = pd.DataFrame(action_list)
df_actions.columns = self.data.tic.values
df_actions.index = df_date.date
#df_actions = pd.DataFrame({'date':date_list,'actions':action_list})
else:
date_list = self.date_memory[:-1]
action_list = self.actions_memory
df_actions = pd.DataFrame({'date':date_list,'actions':action_list})
return df_actions
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def get_sb_env(self):
e = DummyVecEnv([lambda: self])
obs = e.reset()
return e, obs
print(StockTradingEnv.__doc__)
###Output
A stock trading environment for OpenAI gym
###Markdown
state spaceThe state space of the observation is as follows `start_cash, , for j in assets>`indicators are any daily measurement you can achieve. Common ones are 'volume', 'open' 'close' 'high', 'low'.However, you can add these as needed, The feature engineer adds indicators, and you can add your own as well.
###Code
information_cols = ['open', 'high', 'low', 'close', 'volume', 'day', 'macd', 'rsi_30', 'cci_30', 'dx_30', 'turbulence']
env_kwargs = {
"hmax": 5000,
"daily_reward": 5,
"out_of_cash_penalty": 0,
"cash_penalty_proportion": 0.1,
"daily_information_cols": information_cols,
"print_verbosity": 500,
}
e_train_gym = StockTradingEnv(df = train, cache_indicator_data=True,
**env_kwargs)
# e_train_obs = StockTradingEnvV2(df = train, cache_indicator_data=False, **env_kwargs)
# e_trade_gym = StockTradingEnvV2(df = train,**env_kwargs)
# env_trade, obs_trade = e_trade_gym.get_sb_env()
###Output
_____no_output_____
###Markdown
Environment for TrainingThere are two available environments. The multiprocessing and the single processing env. Some models won't work with multiprocessing. ```python single processingenv_train, _ = e_train_gym.get_sb_env()multiprocessingenv_train, _ = e_train_gym.get_multiproc_env(n = )```
###Code
# for this example, let's do multiprocessing with n_cores-2
import multiprocessing
n_cores = multiprocessing.cpu_count() - 2
n_cores = 24
print(f"using {n_cores} cores")
#
e_train_gym.print_verbosity = 500
env_train, _ = e_train_gym.get_multiproc_env(n = n_cores)
# env_train, _ = e_train_gym.get_sb_env()
env_train_obs, _ = e_train_gym.get_sb_env()
###Output
using 24 cores
###Markdown
Part 6: Implement DRL Algorithms* The implementation of the DRL algorithms are based on **OpenAI Baselines** and **Stable Baselines**. Stable Baselines is a fork of OpenAI Baselines, with a major structural refactoring, and code cleanups.* FinRL library includes fine-tuned standard DRL algorithms, such as DQN, DDPG,Multi-Agent DDPG, PPO, SAC, A2C and TD3. We also allow users todesign their own DRL algorithms by adapting these DRL algorithms.
###Code
agent = DRLAgent(env = env_train)
print(config.PPO_PARAMS)
###Output
{'n_steps': 2048, 'ent_coef': 0.01, 'learning_rate': 0.00025, 'batch_size': 64}
###Markdown
Model Training: 5 models, A2C DDPG, PPO, TD3, SAC Model 1: A2C
###Code
from torch.nn import Softsign, ReLU
ppo_params ={'n_steps': 256,
'ent_coef': 0.01,
'learning_rate': 0.00001,
'batch_size': 256,
'gamma': 0.99}
policy_kwargs = {
# "activation_fn": ReLU,
"net_arch": [1024, 1024, 1024, 1024],
# "squash_output": True
}
model = agent.get_model("ppo", model_kwargs = ppo_params, policy_kwargs = policy_kwargs, verbose = 0)
model.learn(total_timesteps = 10000000,
eval_env = env_train_obs,
eval_freq = 1000,
log_interval = 1,
tb_log_name = 'cashbuffer_1_16_longrun',
n_eval_episodes = 1,
reset_num_timesteps = True)
model.save("quicksave_ppo_dow_1_17.model")
data_turbulence = processed[(processed.date<'2019-01-01') & (processed.date>='2009-01-01')]
insample_turbulence = data_turbulence.drop_duplicates(subset=['date'])
insample_turbulence.turbulence.describe()
turbulence_threshold = np.quantile(insample_turbulence.turbulence.values,1)
turbulence_threshold
###Output
_____no_output_____
###Markdown
TradeDRL model needs to update periodically in order to take full advantage of the data, ideally we need to retrain our model yearly, quarterly, or monthly. We also need to tune the parameters along the way, in this notebook I only use the in-sample data from 2009-01 to 2018-12 to tune the parameters once, so there is some alpha decay here as the length of trade date extends. Numerous hyperparameters – e.g. the learning rate, the total number of samples to train on – influence the learning process and are usually determined by testing some variations.
###Code
trade = data_split(processed, '2019-01-01','2021-01-01')
env_kwargs = {
"hmax": 5000,
"daily_reward": 5,
"out_of_cash_penalty": 0,
"cash_penalty_proportion": 0.1,
"daily_information_cols": information_cols,
"print_verbosity": 50,
"random_start": False,
"cache_indicator_data": False
}
e_trade_gym = StockTradingEnvV2(df = trade,**env_kwargs)
env_trade, obs_trade = e_trade_gym.get_sb_env()
print(len(e_trade_gym.dates))
df_account_value, df_actions = DRLAgent.DRL_prediction(model=model,
environment=e_trade_gym)
df_account_value.shape
df_account_value.head(50)
###Output
_____no_output_____
###Markdown
Part 7: Backtest Our StrategyBacktesting plays a key role in evaluating the performance of a trading strategy. Automated backtesting tool is preferred because it reduces the human error. We usually use the Quantopian pyfolio package to backtest our trading strategies. It is easy to use and consists of various individual plots that provide a comprehensive image of the performance of a trading strategy. 7.1 BackTestStatspass in df_account_value, this information is stored in env class
###Code
print("==============Get Backtest Results===========")
now = datetime.datetime.now().strftime('%Y%m%d-%Hh%M')
perf_stats_all = backtest_stats(account_value=df_account_value, value_col_name = 'total_assets')
perf_stats_all = pd.DataFrame(perf_stats_all)
perf_stats_all.to_csv("./"+config.RESULTS_DIR+"/perf_stats_all_"+now+'.csv')
###Output
==============Get Backtest Results===========
annual return: -0.31429838045686775
sharpe ratio: -0.03215222673502538
Annual return -0.007905
Cumulative returns -0.015717
Annual volatility 0.097907
Sharpe ratio -0.032152
Calmar ratio -0.050407
Stability 0.353511
Max drawdown -0.156831
Omega ratio 0.993158
Sortino ratio -0.044483
Skew -0.219349
Kurtosis 9.495171
Tail ratio 0.946898
Daily value at risk -0.012348
Alpha 0.000000
Beta 1.000000
dtype: float64
###Markdown
7.2 BackTestPlot
###Code
print("==============Compare to DJIA===========")
%matplotlib inline
# S&P 500: ^GSPC
# Dow Jones Index: ^DJI
# NASDAQ 100: ^NDX
backtest_plot(df_account_value,
baseline_ticker = '^DJI',
baseline_start = '2019-01-01',
baseline_end = '2021-01-01', value_col_name = 'total_assets')
###Output
==============Compare to DJIA===========
annual return: -0.31429838045686775
sharpe ratio: -0.03215222673502538
[*********************100%***********************] 1 of 1 completed
Shape of DataFrame: (505, 8)
###Markdown
7.3 Baseline Stats
###Code
print("==============Get Baseline Stats===========")
baseline_df = get_baseline(
ticker="^DJI",
start = '2019-01-01',
end = '2021-01-01')
baseline_stats = backtest_stats(baseline_df, value_col_name = 'close')
###Output
==============Get Baseline Stats===========
[*********************100%***********************] 1 of 1 completed
Shape of DataFrame: (505, 8)
Annual return 0.144674
Cumulative returns 0.310981
Annual volatility 0.274619
Sharpe ratio 0.631418
Calmar ratio 0.390102
Stability 0.116677
Max drawdown -0.370862
Omega ratio 1.149365
Sortino ratio 0.870084
Skew NaN
Kurtosis NaN
Tail ratio 0.860710
Daily value at risk -0.033911
Alpha 0.000000
Beta 1.000000
dtype: float64
###Markdown
Part 5. Design EnvironmentConsidering the stochastic and interactive nature of the automated stock trading tasks, a financial task is modeled as a **Markov Decision Process (MDP)** problem. The training process involves observing stock price change, taking an action and reward's calculation to have the agent adjusting its strategy accordingly. By interacting with the environment, the trading agent will derive a trading strategy with the maximized rewards as time proceeds.Our trading environments, based on OpenAI Gym framework, simulate live stock markets with real market data according to the principle of time-driven simulation.The action space describes the allowed actions that the agent interacts with the environment. Normally, action a includes three actions: {-1, 0, 1}, where -1, 0, 1 represent selling, holding, and buying one share. Also, an action can be carried upon multiple shares. We use an action space {-k,…,-1, 0, 1, …, k}, where k denotes the number of shares to buy and -k denotes the number of shares to sell. For example, "Buy 10 shares of AAPL" or "Sell 10 shares of AAPL" are 10 or -10, respectively. The continuous action space needs to be normalized to [-1, 1], since the policy is defined on a Gaussian distribution, which needs to be normalized and symmetric. Training data split: 2009-01-01 to 2018-12-31 Trade data split: 2019-01-01 to 2020-09-30
###Code
train = data_split(processed, '2009-01-01','2019-01-01')
trade = data_split(processed, '2019-01-01','2021-01-01')
print(len(train))
print(len(trade))
import time
milliseconds = int(round(time.time() * 1000))
print(milliseconds)
import numpy as np
import pandas as pd
from gym.utils import seeding
import gym
from gym import spaces
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pickle
from stable_baselines3.common.vec_env import DummyVecEnv
#from stable_baselines3.common import logger
class StockTradingEnv(gym.Env):
"""A stock trading environment for OpenAI gym"""
metadata = {'render.modes': ['human']}
def __init__(self,
df,
stock_dim,
hmax,
initial_amount,
buy_cost_pct,
sell_cost_pct,
reward_scaling,
state_space,
action_space,
tech_indicator_list,
turbulence_threshold=None,
risk_indicator_col='turbulence',
make_plots = False,
print_verbosity = 10,
day = 0,
initial=True,
previous_state=[],
model_name = '',
mode='',
iteration=''):
self.day = day
self.df = df
self.stock_dim = stock_dim
self.hmax = hmax
self.initial_amount = initial_amount
self.buy_cost_pct = buy_cost_pct
self.sell_cost_pct = sell_cost_pct
self.reward_scaling = reward_scaling
self.state_space = state_space
self.action_space = action_space
self.tech_indicator_list = tech_indicator_list
self.action_space = spaces.Box(low = -1, high = 1,shape = (self.action_space,))
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape = (self.state_space,))
self.data = self.df.loc[self.day,:]
self.terminal = False
self.make_plots = make_plots
self.print_verbosity = print_verbosity
self.turbulence_threshold = turbulence_threshold
self.risk_indicator_col = risk_indicator_col
self.initial = initial
self.previous_state = previous_state
self.model_name=model_name
self.mode=mode
self.iteration=iteration
# initalize state
self.state = self._initiate_state()
# initialize reward
self.reward = 0
self.turbulence = 0
self.cost = 0
self.trades = 0
self.episode = 0
# memorize all the total balance change
self.asset_memory = [self.initial_amount]
self.rewards_memory = []
self.actions_memory=[]
self.date_memory=[self._get_date()]
#self.reset()
self._seed()
def _sell_stock(self, index, action):
def _do_sell_normal():
if self.state[index+1]>0:
# Sell only if the price is > 0 (no missing data in this particular date)
# perform sell action based on the sign of the action
if self.state[index+self.stock_dim+1] > 0:
# Sell only if current asset is > 0
sell_num_shares = min(abs(action),self.state[index+self.stock_dim+1])
sell_amount = self.state[index+1] * sell_num_shares * (1- self.sell_cost_pct)
#update balance
self.state[0] += sell_amount
self.state[index+self.stock_dim+1] -= sell_num_shares
self.cost +=self.state[index+1] * sell_num_shares * self.sell_cost_pct
self.trades+=1
else:
sell_num_shares = 0
else:
sell_num_shares = 0
return sell_num_shares
# perform sell action based on the sign of the action
if self.turbulence_threshold is not None:
if self.turbulence>=self.turbulence_threshold:
if self.state[index+1]>0:
# Sell only if the price is > 0 (no missing data in this particular date)
# if turbulence goes over threshold, just clear out all positions
if self.state[index+self.stock_dim+1] > 0:
# Sell only if current asset is > 0
sell_num_shares = self.state[index+self.stock_dim+1]
sell_amount = self.state[index+1]*sell_num_shares* (1- self.sell_cost_pct)
#update balance
self.state[0] += sell_amount
self.state[index+self.stock_dim+1] =0
self.cost += self.state[index+1]*sell_num_shares* \
self.sell_cost_pct
self.trades+=1
else:
sell_num_shares = 0
else:
sell_num_shares = 0
else:
sell_num_shares = _do_sell_normal()
else:
sell_num_shares = _do_sell_normal()
return sell_num_shares
def _buy_stock(self, index, action):
def _do_buy():
if self.state[index+1]>0:
#Buy only if the price is > 0 (no missing data in this particular date)
available_amount = self.state[0] // self.state[index+1]
# print('available_amount:{}'.format(available_amount))
#update balance
buy_num_shares = min(available_amount, action)
buy_amount = self.state[index+1] * buy_num_shares * (1+ self.buy_cost_pct)
self.state[0] -= buy_amount
self.state[index+self.stock_dim+1] += buy_num_shares
self.cost+=self.state[index+1] * buy_num_shares * self.buy_cost_pct
self.trades+=1
else:
buy_num_shares = 0
return buy_num_shares
# perform buy action based on the sign of the action
if self.turbulence_threshold is None:
buy_num_shares = _do_buy()
else:
if self.turbulence< self.turbulence_threshold:
buy_num_shares = _do_buy()
else:
buy_num_shares = 0
pass
return buy_num_shares
def _make_plot(self):
plt.plot(self.asset_memory,'r')
plt.savefig('results/account_value_trade_{}.png'.format(self.episode))
plt.close()
def step(self, actions):
self.terminal = self.day >= len(self.df.index.unique())-1
if self.terminal:
# print(f"Episode: {self.episode}")
if self.make_plots:
self._make_plot()
end_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))
df_total_value = pd.DataFrame(self.asset_memory)
tot_reward = self.state[0]+sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))- self.initial_amount
df_total_value.columns = ['account_value']
df_total_value['date'] = self.date_memory
df_total_value['daily_return']=df_total_value['account_value'].pct_change(1)
if df_total_value['daily_return'].std() !=0:
sharpe = (252**0.5)*df_total_value['daily_return'].mean()/ \
df_total_value['daily_return'].std()
df_rewards = pd.DataFrame(self.rewards_memory)
df_rewards.columns = ['account_rewards']
df_rewards['date'] = self.date_memory[:-1]
if self.episode % self.print_verbosity == 0:
print(f"day: {self.day}, episode: {self.episode}")
print(f"begin_total_asset: {self.asset_memory[0]:0.2f}")
print(f"end_total_asset: {end_total_asset:0.2f}")
print(f"total_reward: {tot_reward:0.2f}")
print(f"total_cost: {self.cost:0.2f}")
print(f"total_trades: {self.trades}")
if df_total_value['daily_return'].std() != 0:
print(f"Sharpe: {sharpe:0.3f}")
print("=================================")
if (self.model_name!='') and (self.mode!=''):
df_actions = self.save_action_memory()
df_actions.to_csv('results/actions_{}_{}_{}.csv'.format(self.mode,self.model_name, self.iteration))
df_total_value.to_csv('results/account_value_{}_{}_{}.csv'.format(self.mode,self.model_name, self.iteration),index=False)
df_rewards.to_csv('results/account_rewards_{}_{}_{}.csv'.format(self.mode,self.model_name, self.iteration),index=False)
plt.plot(self.asset_memory,'r')
plt.savefig('results/account_value_{}_{}_{}.png'.format(self.mode,self.model_name, self.iteration),index=False)
plt.close()
# Add outputs to logger interface
#logger.record("environment/portfolio_value", end_total_asset)
#logger.record("environment/total_reward", tot_reward)
#logger.record("environment/total_reward_pct", (tot_reward / (end_total_asset - tot_reward)) * 100)
#logger.record("environment/total_cost", self.cost)
#logger.record("environment/total_trades", self.trades)
return self.state, self.reward, self.terminal, {}
else:
actions = actions * self.hmax #actions initially is scaled between 0 to 1
actions = (actions.astype(int)) #convert into integer because we can't by fraction of shares
if self.turbulence_threshold is not None:
if self.turbulence>=self.turbulence_threshold:
actions=np.array([-self.hmax]*self.stock_dim)
begin_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))
#print("begin_total_asset:{}".format(begin_total_asset))
argsort_actions = np.argsort(actions)
sell_index = argsort_actions[:np.where(actions < 0)[0].shape[0]]
buy_index = argsort_actions[::-1][:np.where(actions > 0)[0].shape[0]]
for index in sell_index:
# print(f"Num shares before: {self.state[index+self.stock_dim+1]}")
# print(f'take sell action before : {actions[index]}')
actions[index] = self._sell_stock(index, actions[index]) * (-1)
# print(f'take sell action after : {actions[index]}')
# print(f"Num shares after: {self.state[index+self.stock_dim+1]}")
for index in buy_index:
# print('take buy action: {}'.format(actions[index]))
actions[index] = self._buy_stock(index, actions[index])
self.actions_memory.append(actions)
#state: s -> s+1
self.day += 1
self.data = self.df.loc[self.day,:]
if self.turbulence_threshold is not None:
self.turbulence = self.data[self.risk_indicator_col].values[0]
self.state = self._update_state()
end_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))
self.asset_memory.append(end_total_asset)
self.date_memory.append(self._get_date())
self.reward = end_total_asset - begin_total_asset
self.rewards_memory.append(self.reward)
self.reward = self.reward*self.reward_scaling
return self.state, self.reward, self.terminal, {}
def reset(self):
#initiate state
self.state = self._initiate_state()
if self.initial:
self.asset_memory = [self.initial_amount]
else:
previous_total_asset = self.previous_state[0]+ \
sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.previous_state[(self.stock_dim+1):(self.stock_dim*2+1)]))
self.asset_memory = [previous_total_asset]
self.day = 0
self.data = self.df.loc[self.day,:]
self.turbulence = 0
self.cost = 0
self.trades = 0
self.terminal = False
# self.iteration=self.iteration
self.rewards_memory = []
self.actions_memory=[]
self.date_memory=[self._get_date()]
self.episode+=1
return self.state
def render(self, mode='human',close=False):
return self.state
def _initiate_state(self):
if self.initial:
# For Initial State
if len(self.df.tic.unique())>1:
# for multiple stock
state = [self.initial_amount] + \
self.data.close.values.tolist() + \
[0]*self.stock_dim + \
sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [])
else:
# for single stock
state = [self.initial_amount] + \
[self.data.close] + \
[0]*self.stock_dim + \
sum([[self.data[tech]] for tech in self.tech_indicator_list ], [])
else:
#Using Previous State
if len(self.df.tic.unique())>1:
# for multiple stock
state = [self.previous_state[0]] + \
self.data.close.values.tolist() + \
self.previous_state[(self.stock_dim+1):(self.stock_dim*2+1)] + \
sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [])
else:
# for single stock
state = [self.previous_state[0]] + \
[self.data.close] + \
self.previous_state[(self.stock_dim+1):(self.stock_dim*2+1)] + \
sum([[self.data[tech]] for tech in self.tech_indicator_list ], [])
return state
def _update_state(self):
if len(self.df.tic.unique())>1:
# for multiple stock
state = [self.state[0]] + \
self.data.close.values.tolist() + \
list(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]) + \
sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [])
else:
# for single stock
state = [self.state[0]] + \
[self.data.close] + \
list(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]) + \
sum([[self.data[tech]] for tech in self.tech_indicator_list ], [])
return state
def _get_date(self):
if len(self.df.tic.unique())>1:
date = self.data.date.unique()[0]
else:
date = self.data.date
return date
def save_asset_memory(self):
date_list = self.date_memory
asset_list = self.asset_memory
#print(len(date_list))
#print(len(asset_list))
df_account_value = pd.DataFrame({'date':date_list,'account_value':asset_list})
return df_account_value
def save_action_memory(self):
if len(self.df.tic.unique())>1:
# date and close price length must match actions length
date_list = self.date_memory[:-1]
df_date = pd.DataFrame(date_list)
df_date.columns = ['date']
action_list = self.actions_memory
df_actions = pd.DataFrame(action_list)
df_actions.columns = self.data.tic.values
df_actions.index = df_date.date
#df_actions = pd.DataFrame({'date':date_list,'actions':action_list})
else:
date_list = self.date_memory[:-1]
action_list = self.actions_memory
df_actions = pd.DataFrame({'date':date_list,'actions':action_list})
return df_actions
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def get_sb_env(self):
e = DummyVecEnv([lambda: self])
obs = e.reset()
return e, obs
print(StockTradingEnv.__doc__)
###Output
A stock trading environment for OpenAI gym
###Markdown
state spaceThe state space of the observation is as follows `start_cash, , for j in assets>`indicators are any daily measurement you can achieve. Common ones are 'volume', 'open' 'close' 'high', 'low'.However, you can add these as needed, The feature engineer adds indicators, and you can add your own as well.
###Code
information_cols = ['open', 'high', 'low', 'close', 'volume', 'day', 'macd', 'rsi_30', 'cci_30', 'dx_30', 'turbulence']
env_kwargs = {
"hmax": 5000,
"daily_reward": 5,
"out_of_cash_penalty": 0,
"cash_penalty_proportion": 0.1,
"daily_information_cols": information_cols,
"print_verbosity": 500,
}
e_train_gym = StockTradingEnv(df = train, cache_indicator_data=True,
**env_kwargs)
# e_train_obs = StockTradingEnvV2(df = train, cache_indicator_data=False, **env_kwargs)
# e_trade_gym = StockTradingEnvV2(df = train,**env_kwargs)
# env_trade, obs_trade = e_trade_gym.get_sb_env()
###Output
_____no_output_____
###Markdown
Environment for TrainingThere are two available environments. The multiprocessing and the single processing env. Some models won't work with multiprocessing. ```python single processingenv_train, _ = e_train_gym.get_sb_env()multiprocessingenv_train, _ = e_train_gym.get_multiproc_env(n = )```
###Code
# for this example, let's do multiprocessing with n_cores-2
import multiprocessing
n_cores = multiprocessing.cpu_count() - 2
n_cores = 24
print(f"using {n_cores} cores")
#
e_train_gym.print_verbosity = 500
env_train, _ = e_train_gym.get_multiproc_env(n = n_cores)
# env_train, _ = e_train_gym.get_sb_env()
env_train_obs, _ = e_train_gym.get_sb_env()
###Output
using 24 cores
###Markdown
Part 6: Implement DRL Algorithms* The implementation of the DRL algorithms are based on **OpenAI Baselines** and **Stable Baselines**. Stable Baselines is a fork of OpenAI Baselines, with a major structural refactoring, and code cleanups.* FinRL library includes fine-tuned standard DRL algorithms, such as DQN, DDPG,Multi-Agent DDPG, PPO, SAC, A2C and TD3. We also allow users todesign their own DRL algorithms by adapting these DRL algorithms.
###Code
agent = DRLAgent(env = env_train)
print(config.PPO_PARAMS)
###Output
{'n_steps': 2048, 'ent_coef': 0.01, 'learning_rate': 0.00025, 'batch_size': 64}
###Markdown
Model Training: 5 models, A2C DDPG, PPO, TD3, SAC Model 1: A2C
###Code
from torch.nn import Softsign, ReLU
ppo_params ={'n_steps': 256,
'ent_coef': 0.01,
'learning_rate': 0.00001,
'batch_size': 256,
'gamma': 0.99}
policy_kwargs = {
# "activation_fn": ReLU,
"net_arch": [1024, 1024, 1024, 1024],
# "squash_output": True
}
model = agent.get_model("ppo", model_kwargs = ppo_params, policy_kwargs = policy_kwargs, verbose = 0)
model.learn(total_timesteps = 10000000,
eval_env = env_train_obs,
eval_freq = 1000,
log_interval = 1,
tb_log_name = 'cashbuffer_1_16_longrun',
n_eval_episodes = 1,
reset_num_timesteps = True)
model.save("quicksave_ppo_dow_1_17.model")
data_turbulence = processed[(processed.date<'2019-01-01') & (processed.date>='2009-01-01')]
insample_turbulence = data_turbulence.drop_duplicates(subset=['date'])
insample_turbulence.turbulence.describe()
turbulence_threshold = np.quantile(insample_turbulence.turbulence.values,1)
turbulence_threshold
###Output
_____no_output_____
###Markdown
TradeDRL model needs to update periodically in order to take full advantage of the data, ideally we need to retrain our model yearly, quarterly, or monthly. We also need to tune the parameters along the way, in this notebook I only use the in-sample data from 2009-01 to 2018-12 to tune the parameters once, so there is some alpha decay here as the length of trade date extends. Numerous hyperparameters – e.g. the learning rate, the total number of samples to train on – influence the learning process and are usually determined by testing some variations.
###Code
trade = data_split(processed, '2019-01-01','2021-01-01')
env_kwargs = {
"hmax": 5000,
"daily_reward": 5,
"out_of_cash_penalty": 0,
"cash_penalty_proportion": 0.1,
"daily_information_cols": information_cols,
"print_verbosity": 50,
"random_start": False,
"cache_indicator_data": False
}
e_trade_gym = StockTradingEnvV2(df = trade,**env_kwargs)
env_trade, obs_trade = e_trade_gym.get_sb_env()
print(len(e_trade_gym.dates))
df_account_value, df_actions = DRLAgent.DRL_prediction(model=model,
environment=e_trade_gym)
df_account_value.shape
df_account_value.head(50)
###Output
_____no_output_____
###Markdown
Part 7: Backtest Our StrategyBacktesting plays a key role in evaluating the performance of a trading strategy. Automated backtesting tool is preferred because it reduces the human error. We usually use the Quantopian pyfolio package to backtest our trading strategies. It is easy to use and consists of various individual plots that provide a comprehensive image of the performance of a trading strategy. 7.1 BackTestStatspass in df_account_value, this information is stored in env class
###Code
print("==============Get Backtest Results===========")
now = datetime.datetime.now().strftime('%Y%m%d-%Hh%M')
perf_stats_all = backtest_stats(account_value=df_account_value, value_col_name = 'total_assets')
perf_stats_all = pd.DataFrame(perf_stats_all)
perf_stats_all.to_csv("./"+config.RESULTS_DIR+"/perf_stats_all_"+now+'.csv')
###Output
==============Get Backtest Results===========
annual return: -0.31429838045686775
sharpe ratio: -0.03215222673502538
Annual return -0.007905
Cumulative returns -0.015717
Annual volatility 0.097907
Sharpe ratio -0.032152
Calmar ratio -0.050407
Stability 0.353511
Max drawdown -0.156831
Omega ratio 0.993158
Sortino ratio -0.044483
Skew -0.219349
Kurtosis 9.495171
Tail ratio 0.946898
Daily value at risk -0.012348
Alpha 0.000000
Beta 1.000000
dtype: float64
###Markdown
7.2 BackTestPlot
###Code
print("==============Compare to DJIA===========")
%matplotlib inline
# S&P 500: ^GSPC
# Dow Jones Index: ^DJI
# NASDAQ 100: ^NDX
backtest_plot(df_account_value,
baseline_ticker = '^DJI',
baseline_start = '2019-01-01',
baseline_end = '2021-01-01', value_col_name = 'total_assets')
###Output
==============Compare to DJIA===========
annual return: -0.31429838045686775
sharpe ratio: -0.03215222673502538
[*********************100%***********************] 1 of 1 completed
Shape of DataFrame: (505, 8)
###Markdown
7.3 Baseline Stats
###Code
print("==============Get Baseline Stats===========")
baseline_df = get_baseline(
ticker="^DJI",
start = '2019-01-01',
end = '2021-01-01')
baseline_stats = backtest_stats(baseline_df, value_col_name = 'close')
###Output
==============Get Baseline Stats===========
[*********************100%***********************] 1 of 1 completed
Shape of DataFrame: (505, 8)
Annual return 0.144674
Cumulative returns 0.310981
Annual volatility 0.274619
Sharpe ratio 0.631418
Calmar ratio 0.390102
Stability 0.116677
Max drawdown -0.370862
Omega ratio 1.149365
Sortino ratio 0.870084
Skew NaN
Kurtosis NaN
Tail ratio 0.860710
Daily value at risk -0.033911
Alpha 0.000000
Beta 1.000000
dtype: float64
###Markdown
Deep Reinforcement Learning for Stock Trading from Scratch: Multiple Stock TradingTutorials to use OpenAI DRL to trade multiple stocks in one Jupyter Notebook | Presented at NeurIPS 2020: Deep RL Workshop* This blog is based on our paper: FinRL: A Deep Reinforcement Learning Library for Automated Stock Trading in Quantitative Finance, presented at NeurIPS 2020: Deep RL Workshop.* Check out medium blog for detailed explanations: https://towardsdatascience.com/finrl-for-quantitative-finance-tutorial-for-multiple-stock-trading-7b00763b7530* Please report any issues to our Github: https://github.com/AI4Finance-LLC/FinRL-Library/issues* **Pytorch Version** Content * [1. Problem Definition](0)* [2. Getting Started - Load Python packages](1) * [2.1. Install Packages](1.1) * [2.2. Check Additional Packages](1.2) * [2.3. Import Packages](1.3) * [2.4. Create Folders](1.4)* [3. Download Data](2)* [4. Preprocess Data](3) * [4.1. Technical Indicators](3.1) * [4.2. Perform Feature Engineering](3.2)* [5.Build Environment](4) * [5.1. Training & Trade Data Split](4.1) * [5.2. User-defined Environment](4.2) * [5.3. Initialize Environment](4.3) * [6.Implement DRL Algorithms](5) * [7.Backtesting Performance](6) * [7.1. BackTestStats](6.1) * [7.2. BackTestPlot](6.2) * [7.3. Baseline Stats](6.3) * [7.3. Compare to Stock Market Index](6.4) Part 1. Problem Definition This problem is to design an automated trading solution for single stock trading. We model the stock trading process as a Markov Decision Process (MDP). We then formulate our trading goal as a maximization problem.The algorithm is trained using Deep Reinforcement Learning (DRL) algorithms and the components of the reinforcement learning environment are:* Action: The action space describes the allowed actions that the agent interacts with theenvironment. Normally, a ∈ A includes three actions: a ∈ {−1, 0, 1}, where −1, 0, 1 representselling, holding, and buying one stock. Also, an action can be carried upon multiple shares. We usean action space {−k, ..., −1, 0, 1, ..., k}, where k denotes the number of shares. For example, "Buy10 shares of AAPL" or "Sell 10 shares of AAPL" are 10 or −10, respectively* Reward function: r(s, a, s′) is the incentive mechanism for an agent to learn a better action. The change of the portfolio value when action a is taken at state s and arriving at new state s', i.e., r(s, a, s′) = v′ − v, where v′ and v represent the portfoliovalues at state s′ and s, respectively* State: The state space describes the observations that the agent receives from the environment. Just as a human trader needs to analyze various information before executing a trade, soour trading agent observes many different features to better learn in an interactive environment.* Environment: Dow 30 consituentsThe data of the single stock that we will be using for this case study is obtained from Yahoo Finance API. The data contains Open-High-Low-Close price and volume. Part 2. Getting Started- ASSUMES USING DOCKER, see readme for instructions 2.1. Add FinRL to your path. You can of course install it as a pipy package, but this is for development purposes.
###Code
import sys
sys.path.append("..")
import pandas as pd
print(pd.__version__)
###Output
1.1.5
###Markdown
2.2. Check if the additional packages needed are present, if not install them. * Yahoo Finance API* pandas* numpy* matplotlib* stockstats* OpenAI gym* stable-baselines* tensorflow* pyfolio 2.3. Import Packages
###Code
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# matplotlib.use('Agg')
import datetime
%matplotlib inline
from finrl import config
from finrl.neo_finrl.preprocessor.yahoodownloader import YahooDownloader
from finrl.neo_finrl.preprocessor.preprocessors import FeatureEngineer, data_split
from finrl.neo_finrl.env_stock_trading.env_stocktrading import StockTradingEnv
from finrl.agents.stablebaselines3.models import DRLAgent
from finrl.plot import backtest_stats, backtest_plot, get_daily_return, get_baseline
from pprint import pprint
###Output
_____no_output_____
###Markdown
2.4. Create Folders
###Code
import os
if not os.path.exists("./" + config.DATA_SAVE_DIR):
os.makedirs("./" + config.DATA_SAVE_DIR)
if not os.path.exists("./" + config.TRAINED_MODEL_DIR):
os.makedirs("./" + config.TRAINED_MODEL_DIR)
if not os.path.exists("./" + config.TENSORBOARD_LOG_DIR):
os.makedirs("./" + config.TENSORBOARD_LOG_DIR)
if not os.path.exists("./" + config.RESULTS_DIR):
os.makedirs("./" + config.RESULTS_DIR)
###Output
_____no_output_____
###Markdown
Part 3. Download DataYahoo Finance is a website that provides stock data, financial news, financial reports, etc. All the data provided by Yahoo Finance is free.* FinRL uses a class **YahooDownloader** to fetch data from Yahoo Finance API* Call Limit: Using the Public API (without authentication), you are limited to 2,000 requests per hour per IP (or up to a total of 48,000 requests a day). -----class YahooDownloader: Provides methods for retrieving daily stock data from Yahoo Finance API Attributes ---------- start_date : str start date of the data (modified from config.py) end_date : str end date of the data (modified from config.py) ticker_list : list a list of stock tickers (modified from config.py) Methods ------- fetch_data() Fetches data from yahoo API
###Code
# from config.py start_date is a string
config.START_DATE
# from config.py end_date is a string
config.END_DATE
print(config.DOW_30_TICKER)
df = YahooDownloader(start_date = '2009-01-01',
end_date = '2021-01-01',
ticker_list = config.DOW_30_TICKER).fetch_data()
df.shape
df.sort_values(['date','tic'],ignore_index=True).head()
###Output
_____no_output_____
###Markdown
Part 4: Preprocess DataData preprocessing is a crucial step for training a high quality machine learning model. We need to check for missing data and do feature engineering in order to convert the data into a model-ready state.* Add technical indicators. In practical trading, various information needs to be taken into account, for example the historical stock prices, current holding shares, technical indicators, etc. In this article, we demonstrate two trend-following technical indicators: MACD and RSI.* Add turbulence index. Risk-aversion reflects whether an investor will choose to preserve the capital. It also influences one's trading strategy when facing different market volatility level. To control the risk in a worst-case scenario, such as financial crisis of 2007–2008, FinRL employs the financial turbulence index that measures extreme asset price fluctuation.
###Code
fe = FeatureEngineer(
use_technical_indicator=True,
tech_indicator_list = config.INDICATORS,
use_turbulence=True,
user_defined_feature = False)
processed = fe.preprocess_data(df)
processed.sort_values(['date','tic'],ignore_index=True).head(10)
###Output
_____no_output_____
###Markdown
Deep Reinforcement Learning for Stock Trading from Scratch: Multiple Stock TradingTutorials to use OpenAI DRL to trade multiple stocks in one Jupyter Notebook | Presented at NeurIPS 2020: Deep RL Workshop* This blog is based on our paper: FinRL: A Deep Reinforcement Learning Library for Automated Stock Trading in Quantitative Finance, presented at NeurIPS 2020: Deep RL Workshop.* Check out medium blog for detailed explanations: https://towardsdatascience.com/finrl-for-quantitative-finance-tutorial-for-multiple-stock-trading-7b00763b7530* Please report any issues to our Github: https://github.com/AI4Finance-LLC/FinRL-Library/issues* **Pytorch Version** Content * [1. Problem Definition](0)* [2. Getting Started - Load Python packages](1) * [2.1. Install Packages](1.1) * [2.2. Check Additional Packages](1.2) * [2.3. Import Packages](1.3) * [2.4. Create Folders](1.4)* [3. Download Data](2)* [4. Preprocess Data](3) * [4.1. Technical Indicators](3.1) * [4.2. Perform Feature Engineering](3.2)* [5.Build Environment](4) * [5.1. Training & Trade Data Split](4.1) * [5.2. User-defined Environment](4.2) * [5.3. Initialize Environment](4.3) * [6.Implement DRL Algorithms](5) * [7.Backtesting Performance](6) * [7.1. BackTestStats](6.1) * [7.2. BackTestPlot](6.2) * [7.3. Baseline Stats](6.3) * [7.3. Compare to Stock Market Index](6.4) Part 1. Problem Definition This problem is to design an automated trading solution for single stock trading. We model the stock trading process as a Markov Decision Process (MDP). We then formulate our trading goal as a maximization problem.The algorithm is trained using Deep Reinforcement Learning (DRL) algorithms and the components of the reinforcement learning environment are:* Action: The action space describes the allowed actions that the agent interacts with theenvironment. Normally, a ∈ A includes three actions: a ∈ {−1, 0, 1}, where −1, 0, 1 representselling, holding, and buying one stock. Also, an action can be carried upon multiple shares. We usean action space {−k, ..., −1, 0, 1, ..., k}, where k denotes the number of shares. For example, "Buy10 shares of AAPL" or "Sell 10 shares of AAPL" are 10 or −10, respectively* Reward function: r(s, a, s′) is the incentive mechanism for an agent to learn a better action. The change of the portfolio value when action a is taken at state s and arriving at new state s', i.e., r(s, a, s′) = v′ − v, where v′ and v represent the portfoliovalues at state s′ and s, respectively* State: The state space describes the observations that the agent receives from the environment. Just as a human trader needs to analyze various information before executing a trade, soour trading agent observes many different features to better learn in an interactive environment.* Environment: Dow 30 consituentsThe data of the single stock that we will be using for this case study is obtained from Yahoo Finance API. The data contains Open-High-Low-Close price and volume. Part 2. Getting Started- ASSUMES USING DOCKER, see readme for instructions 2.1. Add FinRL to your path. You can of course install it as a pipy package, but this is for development purposes.
###Code
import sys
sys.path.append("..")
import pandas as pd
print(pd.__version__)
###Output
1.1.5
###Markdown
2.2. Check if the additional packages needed are present, if not install them. * Yahoo Finance API* pandas* numpy* matplotlib* stockstats* OpenAI gym* stable-baselines* tensorflow* pyfolio 2.3. Import Packages
###Code
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# matplotlib.use('Agg')
import datetime
%matplotlib inline
from finrl import config
from finrl.neo_finrl.preprocessor.yahoodownloader import YahooDownloader
from finrl.neo_finrl.preprocessor.preprocessors import FeatureEngineer, data_split
from finrl.neo_finrl.env_stock_trading.env_stocktrading import StockTradingEnv
from finrl.agents.stablebaselines3.models import DRLAgent
from finrl.plot import backtest_stats, backtest_plot, get_daily_return, get_baseline
from pprint import pprint
###Output
_____no_output_____
###Markdown
2.4. Create Folders
###Code
import os
if not os.path.exists("./" + config.DATA_SAVE_DIR):
os.makedirs("./" + config.DATA_SAVE_DIR)
if not os.path.exists("./" + config.TRAINED_MODEL_DIR):
os.makedirs("./" + config.TRAINED_MODEL_DIR)
if not os.path.exists("./" + config.TENSORBOARD_LOG_DIR):
os.makedirs("./" + config.TENSORBOARD_LOG_DIR)
if not os.path.exists("./" + config.RESULTS_DIR):
os.makedirs("./" + config.RESULTS_DIR)
###Output
_____no_output_____
###Markdown
Part 3. Download DataYahoo Finance is a website that provides stock data, financial news, financial reports, etc. All the data provided by Yahoo Finance is free.* FinRL uses a class **YahooDownloader** to fetch data from Yahoo Finance API* Call Limit: Using the Public API (without authentication), you are limited to 2,000 requests per hour per IP (or up to a total of 48,000 requests a day). -----class YahooDownloader: Provides methods for retrieving daily stock data from Yahoo Finance API Attributes ---------- start_date : str start date of the data (modified from config.py) end_date : str end date of the data (modified from config.py) ticker_list : list a list of stock tickers (modified from config.py) Methods ------- fetch_data() Fetches data from yahoo API
###Code
# from config.py start_date is a string
config.START_DATE
# from config.py end_date is a string
config.END_DATE
print(config.DOW_30_TICKER)
df = YahooDownloader(start_date = '2009-01-01',
end_date = '2021-01-01',
ticker_list = config.DOW_30_TICKER).fetch_data()
df.shape
df.sort_values(['date','tic'],ignore_index=True).head()
###Output
_____no_output_____
###Markdown
Part 4: Preprocess DataData preprocessing is a crucial step for training a high quality machine learning model. We need to check for missing data and do feature engineering in order to convert the data into a model-ready state.* Add technical indicators. In practical trading, various information needs to be taken into account, for example the historical stock prices, current holding shares, technical indicators, etc. In this article, we demonstrate two trend-following technical indicators: MACD and RSI.* Add turbulence index. Risk-aversion reflects whether an investor will choose to preserve the capital. It also influences one's trading strategy when facing different market volatility level. To control the risk in a worst-case scenario, such as financial crisis of 2007–2008, FinRL employs the financial turbulence index that measures extreme asset price fluctuation.
###Code
fe = FeatureEngineer(
use_technical_indicator=True,
tech_indicator_list = config.TECHNICAL_INDICATORS_LIST,
use_turbulence=True,
user_defined_feature = False)
processed = fe.preprocess_data(df)
processed.sort_values(['date','tic'],ignore_index=True).head(10)
###Output
_____no_output_____
###Markdown
Part 5. Design EnvironmentConsidering the stochastic and interactive nature of the automated stock trading tasks, a financial task is modeled as a **Markov Decision Process (MDP)** problem. The training process involves observing stock price change, taking an action and reward's calculation to have the agent adjusting its strategy accordingly. By interacting with the environment, the trading agent will derive a trading strategy with the maximized rewards as time proceeds.Our trading environments, based on OpenAI Gym framework, simulate live stock markets with real market data according to the principle of time-driven simulation.The action space describes the allowed actions that the agent interacts with the environment. Normally, action a includes three actions: {-1, 0, 1}, where -1, 0, 1 represent selling, holding, and buying one share. Also, an action can be carried upon multiple shares. We use an action space {-k,…,-1, 0, 1, …, k}, where k denotes the number of shares to buy and -k denotes the number of shares to sell. For example, "Buy 10 shares of AAPL" or "Sell 10 shares of AAPL" are 10 or -10, respectively. The continuous action space needs to be normalized to [-1, 1], since the policy is defined on a Gaussian distribution, which needs to be normalized and symmetric. Training data split: 2009-01-01 to 2018-12-31 Trade data split: 2019-01-01 to 2020-09-30
###Code
train = data_split(processed, '2009-01-01','2019-01-01')
trade = data_split(processed, '2019-01-01','2021-01-01')
print(len(train))
print(len(trade))
import time
milliseconds = int(round(time.time() * 1000))
print(milliseconds)
import numpy as np
import pandas as pd
from gym.utils import seeding
import gym
from gym import spaces
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pickle
from stable_baselines3.common.vec_env import DummyVecEnv
#from stable_baselines3.common import logger
class StockTradingEnv(gym.Env):
"""A stock trading environment for OpenAI gym"""
metadata = {'render.modes': ['human']}
def __init__(self,
df,
stock_dim,
hmax,
initial_amount,
buy_cost_pct,
sell_cost_pct,
reward_scaling,
state_space,
action_space,
tech_indicator_list,
turbulence_threshold=None,
risk_indicator_col='turbulence',
make_plots = False,
print_verbosity = 10,
day = 0,
initial=True,
previous_state=[],
model_name = '',
mode='',
iteration=''):
self.day = day
self.df = df
self.stock_dim = stock_dim
self.hmax = hmax
self.initial_amount = initial_amount
self.buy_cost_pct = buy_cost_pct
self.sell_cost_pct = sell_cost_pct
self.reward_scaling = reward_scaling
self.state_space = state_space
self.action_space = action_space
self.tech_indicator_list = tech_indicator_list
self.action_space = spaces.Box(low = -1, high = 1,shape = (self.action_space,))
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape = (self.state_space,))
self.data = self.df.loc[self.day,:]
self.terminal = False
self.make_plots = make_plots
self.print_verbosity = print_verbosity
self.turbulence_threshold = turbulence_threshold
self.risk_indicator_col = risk_indicator_col
self.initial = initial
self.previous_state = previous_state
self.model_name=model_name
self.mode=mode
self.iteration=iteration
# initalize state
self.state = self._initiate_state()
# initialize reward
self.reward = 0
self.turbulence = 0
self.cost = 0
self.trades = 0
self.episode = 0
# memorize all the total balance change
self.asset_memory = [self.initial_amount]
self.rewards_memory = []
self.actions_memory=[]
self.date_memory=[self._get_date()]
#self.reset()
self._seed()
def _sell_stock(self, index, action):
def _do_sell_normal():
if self.state[index+1]>0:
# Sell only if the price is > 0 (no missing data in this particular date)
# perform sell action based on the sign of the action
if self.state[index+self.stock_dim+1] > 0:
# Sell only if current asset is > 0
sell_num_shares = min(abs(action),self.state[index+self.stock_dim+1])
sell_amount = self.state[index+1] * sell_num_shares * (1- self.sell_cost_pct)
#update balance
self.state[0] += sell_amount
self.state[index+self.stock_dim+1] -= sell_num_shares
self.cost +=self.state[index+1] * sell_num_shares * self.sell_cost_pct
self.trades+=1
else:
sell_num_shares = 0
else:
sell_num_shares = 0
return sell_num_shares
# perform sell action based on the sign of the action
if self.turbulence_threshold is not None:
if self.turbulence>=self.turbulence_threshold:
if self.state[index+1]>0:
# Sell only if the price is > 0 (no missing data in this particular date)
# if turbulence goes over threshold, just clear out all positions
if self.state[index+self.stock_dim+1] > 0:
# Sell only if current asset is > 0
sell_num_shares = self.state[index+self.stock_dim+1]
sell_amount = self.state[index+1]*sell_num_shares* (1- self.sell_cost_pct)
#update balance
self.state[0] += sell_amount
self.state[index+self.stock_dim+1] =0
self.cost += self.state[index+1]*sell_num_shares* \
self.sell_cost_pct
self.trades+=1
else:
sell_num_shares = 0
else:
sell_num_shares = 0
else:
sell_num_shares = _do_sell_normal()
else:
sell_num_shares = _do_sell_normal()
return sell_num_shares
def _buy_stock(self, index, action):
def _do_buy():
if self.state[index+1]>0:
#Buy only if the price is > 0 (no missing data in this particular date)
available_amount = self.state[0] // self.state[index+1]
# print('available_amount:{}'.format(available_amount))
#update balance
buy_num_shares = min(available_amount, action)
buy_amount = self.state[index+1] * buy_num_shares * (1+ self.buy_cost_pct)
self.state[0] -= buy_amount
self.state[index+self.stock_dim+1] += buy_num_shares
self.cost+=self.state[index+1] * buy_num_shares * self.buy_cost_pct
self.trades+=1
else:
buy_num_shares = 0
return buy_num_shares
# perform buy action based on the sign of the action
if self.turbulence_threshold is None:
buy_num_shares = _do_buy()
else:
if self.turbulence< self.turbulence_threshold:
buy_num_shares = _do_buy()
else:
buy_num_shares = 0
pass
return buy_num_shares
def _make_plot(self):
plt.plot(self.asset_memory,'r')
plt.savefig('results/account_value_trade_{}.png'.format(self.episode))
plt.close()
def step(self, actions):
self.terminal = self.day >= len(self.df.index.unique())-1
if self.terminal:
# print(f"Episode: {self.episode}")
if self.make_plots:
self._make_plot()
end_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))
df_total_value = pd.DataFrame(self.asset_memory)
tot_reward = self.state[0]+sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))- self.initial_amount
df_total_value.columns = ['account_value']
df_total_value['date'] = self.date_memory
df_total_value['daily_return']=df_total_value['account_value'].pct_change(1)
if df_total_value['daily_return'].std() !=0:
sharpe = (252**0.5)*df_total_value['daily_return'].mean()/ \
df_total_value['daily_return'].std()
df_rewards = pd.DataFrame(self.rewards_memory)
df_rewards.columns = ['account_rewards']
df_rewards['date'] = self.date_memory[:-1]
if self.episode % self.print_verbosity == 0:
print(f"day: {self.day}, episode: {self.episode}")
print(f"begin_total_asset: {self.asset_memory[0]:0.2f}")
print(f"end_total_asset: {end_total_asset:0.2f}")
print(f"total_reward: {tot_reward:0.2f}")
print(f"total_cost: {self.cost:0.2f}")
print(f"total_trades: {self.trades}")
if df_total_value['daily_return'].std() != 0:
print(f"Sharpe: {sharpe:0.3f}")
print("=================================")
if (self.model_name!='') and (self.mode!=''):
df_actions = self.save_action_memory()
df_actions.to_csv('results/actions_{}_{}_{}.csv'.format(self.mode,self.model_name, self.iteration))
df_total_value.to_csv('results/account_value_{}_{}_{}.csv'.format(self.mode,self.model_name, self.iteration),index=False)
df_rewards.to_csv('results/account_rewards_{}_{}_{}.csv'.format(self.mode,self.model_name, self.iteration),index=False)
plt.plot(self.asset_memory,'r')
plt.savefig('results/account_value_{}_{}_{}.png'.format(self.mode,self.model_name, self.iteration),index=False)
plt.close()
# Add outputs to logger interface
#logger.record("environment/portfolio_value", end_total_asset)
#logger.record("environment/total_reward", tot_reward)
#logger.record("environment/total_reward_pct", (tot_reward / (end_total_asset - tot_reward)) * 100)
#logger.record("environment/total_cost", self.cost)
#logger.record("environment/total_trades", self.trades)
return self.state, self.reward, self.terminal, {}
else:
actions = actions * self.hmax #actions initially is scaled between 0 to 1
actions = (actions.astype(int)) #convert into integer because we can't by fraction of shares
if self.turbulence_threshold is not None:
if self.turbulence>=self.turbulence_threshold:
actions=np.array([-self.hmax]*self.stock_dim)
begin_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))
#print("begin_total_asset:{}".format(begin_total_asset))
argsort_actions = np.argsort(actions)
sell_index = argsort_actions[:np.where(actions < 0)[0].shape[0]]
buy_index = argsort_actions[::-1][:np.where(actions > 0)[0].shape[0]]
for index in sell_index:
# print(f"Num shares before: {self.state[index+self.stock_dim+1]}")
# print(f'take sell action before : {actions[index]}')
actions[index] = self._sell_stock(index, actions[index]) * (-1)
# print(f'take sell action after : {actions[index]}')
# print(f"Num shares after: {self.state[index+self.stock_dim+1]}")
for index in buy_index:
# print('take buy action: {}'.format(actions[index]))
actions[index] = self._buy_stock(index, actions[index])
self.actions_memory.append(actions)
#state: s -> s+1
self.day += 1
self.data = self.df.loc[self.day,:]
if self.turbulence_threshold is not None:
self.turbulence = self.data[self.risk_indicator_col].values[0]
self.state = self._update_state()
end_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]))
self.asset_memory.append(end_total_asset)
self.date_memory.append(self._get_date())
self.reward = end_total_asset - begin_total_asset
self.rewards_memory.append(self.reward)
self.reward = self.reward*self.reward_scaling
return self.state, self.reward, self.terminal, {}
def reset(self):
#initiate state
self.state = self._initiate_state()
if self.initial:
self.asset_memory = [self.initial_amount]
else:
previous_total_asset = self.previous_state[0]+ \
sum(np.array(self.state[1:(self.stock_dim+1)])*np.array(self.previous_state[(self.stock_dim+1):(self.stock_dim*2+1)]))
self.asset_memory = [previous_total_asset]
self.day = 0
self.data = self.df.loc[self.day,:]
self.turbulence = 0
self.cost = 0
self.trades = 0
self.terminal = False
# self.iteration=self.iteration
self.rewards_memory = []
self.actions_memory=[]
self.date_memory=[self._get_date()]
self.episode+=1
return self.state
def render(self, mode='human',close=False):
return self.state
def _initiate_state(self):
if self.initial:
# For Initial State
if len(self.df.tic.unique())>1:
# for multiple stock
state = [self.initial_amount] + \
self.data.close.values.tolist() + \
[0]*self.stock_dim + \
sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [])
else:
# for single stock
state = [self.initial_amount] + \
[self.data.close] + \
[0]*self.stock_dim + \
sum([[self.data[tech]] for tech in self.tech_indicator_list ], [])
else:
#Using Previous State
if len(self.df.tic.unique())>1:
# for multiple stock
state = [self.previous_state[0]] + \
self.data.close.values.tolist() + \
self.previous_state[(self.stock_dim+1):(self.stock_dim*2+1)] + \
sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [])
else:
# for single stock
state = [self.previous_state[0]] + \
[self.data.close] + \
self.previous_state[(self.stock_dim+1):(self.stock_dim*2+1)] + \
sum([[self.data[tech]] for tech in self.tech_indicator_list ], [])
return state
def _update_state(self):
if len(self.df.tic.unique())>1:
# for multiple stock
state = [self.state[0]] + \
self.data.close.values.tolist() + \
list(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]) + \
sum([self.data[tech].values.tolist() for tech in self.tech_indicator_list ], [])
else:
# for single stock
state = [self.state[0]] + \
[self.data.close] + \
list(self.state[(self.stock_dim+1):(self.stock_dim*2+1)]) + \
sum([[self.data[tech]] for tech in self.tech_indicator_list ], [])
return state
def _get_date(self):
if len(self.df.tic.unique())>1:
date = self.data.date.unique()[0]
else:
date = self.data.date
return date
def save_asset_memory(self):
date_list = self.date_memory
asset_list = self.asset_memory
#print(len(date_list))
#print(len(asset_list))
df_account_value = pd.DataFrame({'date':date_list,'account_value':asset_list})
return df_account_value
def save_action_memory(self):
if len(self.df.tic.unique())>1:
# date and close price length must match actions length
date_list = self.date_memory[:-1]
df_date = pd.DataFrame(date_list)
df_date.columns = ['date']
action_list = self.actions_memory
df_actions = pd.DataFrame(action_list)
df_actions.columns = self.data.tic.values
df_actions.index = df_date.date
#df_actions = pd.DataFrame({'date':date_list,'actions':action_list})
else:
date_list = self.date_memory[:-1]
action_list = self.actions_memory
df_actions = pd.DataFrame({'date':date_list,'actions':action_list})
return df_actions
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def get_sb_env(self):
e = DummyVecEnv([lambda: self])
obs = e.reset()
return e, obs
print(StockTradingEnv.__doc__)
###Output
A stock trading environment for OpenAI gym
###Markdown
state spaceThe state space of the observation is as follows `start_cash, , for j in assets>`indicators are any daily measurement you can achieve. Common ones are 'volume', 'open' 'close' 'high', 'low'.However, you can add these as needed, The feature engineer adds indicators, and you can add your own as well.
###Code
information_cols = ['open', 'high', 'low', 'close', 'volume', 'day', 'macd', 'rsi_30', 'cci_30', 'dx_30', 'turbulence']
env_kwargs = {
"hmax": 5000,
"daily_reward": 5,
"out_of_cash_penalty": 0,
"cash_penalty_proportion": 0.1,
"daily_information_cols": information_cols,
"print_verbosity": 500,
}
e_train_gym = StockTradingEnv(df = train, cache_indicator_data=True,
**env_kwargs)
# e_train_obs = StockTradingEnvV2(df = train, cache_indicator_data=False, **env_kwargs)
# e_trade_gym = StockTradingEnvV2(df = train,**env_kwargs)
# env_trade, obs_trade = e_trade_gym.get_sb_env()
###Output
_____no_output_____
###Markdown
Environment for TrainingThere are two available environments. The multiprocessing and the single processing env. Some models won't work with multiprocessing. ```python single processingenv_train, _ = e_train_gym.get_sb_env()multiprocessingenv_train, _ = e_train_gym.get_multiproc_env(n = )```
###Code
# for this example, let's do multiprocessing with n_cores-2
import multiprocessing
n_cores = multiprocessing.cpu_count() - 2
n_cores = 24
print(f"using {n_cores} cores")
#
e_train_gym.print_verbosity = 500
env_train, _ = e_train_gym.get_multiproc_env(n = n_cores)
# env_train, _ = e_train_gym.get_sb_env()
env_train_obs, _ = e_train_gym.get_sb_env()
###Output
using 24 cores
###Markdown
Part 6: Implement DRL Algorithms* The implementation of the DRL algorithms are based on **OpenAI Baselines** and **Stable Baselines**. Stable Baselines is a fork of OpenAI Baselines, with a major structural refactoring, and code cleanups.* FinRL library includes fine-tuned standard DRL algorithms, such as DQN, DDPG,Multi-Agent DDPG, PPO, SAC, A2C and TD3. We also allow users todesign their own DRL algorithms by adapting these DRL algorithms.
###Code
agent = DRLAgent(env = env_train)
print(config.PPO_PARAMS)
###Output
{'n_steps': 2048, 'ent_coef': 0.01, 'learning_rate': 0.00025, 'batch_size': 64}
###Markdown
Model Training: 5 models, A2C DDPG, PPO, TD3, SAC Model 1: A2C
###Code
from torch.nn import Softsign, ReLU
ppo_params ={'n_steps': 256,
'ent_coef': 0.01,
'learning_rate': 0.00001,
'batch_size': 256,
'gamma': 0.99}
policy_kwargs = {
# "activation_fn": ReLU,
"net_arch": [1024, 1024, 1024, 1024],
# "squash_output": True
}
model = agent.get_model("ppo", model_kwargs = ppo_params, policy_kwargs = policy_kwargs, verbose = 0)
model.learn(total_timesteps = 10000000,
eval_env = env_train_obs,
eval_freq = 1000,
log_interval = 1,
tb_log_name = 'cashbuffer_1_16_longrun',
n_eval_episodes = 1,
reset_num_timesteps = True)
model.save("quicksave_ppo_dow_1_17.model")
data_turbulence = processed[(processed.date<'2019-01-01') & (processed.date>='2009-01-01')]
insample_turbulence = data_turbulence.drop_duplicates(subset=['date'])
insample_turbulence.turbulence.describe()
turbulence_threshold = np.quantile(insample_turbulence.turbulence.values,1)
turbulence_threshold
###Output
_____no_output_____
###Markdown
TradeDRL model needs to update periodically in order to take full advantage of the data, ideally we need to retrain our model yearly, quarterly, or monthly. We also need to tune the parameters along the way, in this notebook I only use the in-sample data from 2009-01 to 2018-12 to tune the parameters once, so there is some alpha decay here as the length of trade date extends. Numerous hyperparameters – e.g. the learning rate, the total number of samples to train on – influence the learning process and are usually determined by testing some variations.
###Code
trade = data_split(processed, '2019-01-01','2021-01-01')
env_kwargs = {
"hmax": 5000,
"daily_reward": 5,
"out_of_cash_penalty": 0,
"cash_penalty_proportion": 0.1,
"daily_information_cols": information_cols,
"print_verbosity": 50,
"random_start": False,
"cache_indicator_data": False
}
e_trade_gym = StockTradingEnvV2(df = trade,**env_kwargs)
env_trade, obs_trade = e_trade_gym.get_sb_env()
print(len(e_trade_gym.dates))
df_account_value, df_actions = DRLAgent.DRL_prediction(model=model,
environment=e_trade_gym)
df_account_value.shape
df_account_value.head(50)
###Output
_____no_output_____
###Markdown
Part 7: Backtest Our StrategyBacktesting plays a key role in evaluating the performance of a trading strategy. Automated backtesting tool is preferred because it reduces the human error. We usually use the Quantopian pyfolio package to backtest our trading strategies. It is easy to use and consists of various individual plots that provide a comprehensive image of the performance of a trading strategy. 7.1 BackTestStatspass in df_account_value, this information is stored in env class
###Code
print("==============Get Backtest Results===========")
now = datetime.datetime.now().strftime('%Y%m%d-%Hh%M')
perf_stats_all = backtest_stats(account_value=df_account_value, value_col_name = 'total_assets')
perf_stats_all = pd.DataFrame(perf_stats_all)
perf_stats_all.to_csv("./"+config.RESULTS_DIR+"/perf_stats_all_"+now+'.csv')
###Output
==============Get Backtest Results===========
annual return: -0.31429838045686775
sharpe ratio: -0.03215222673502538
Annual return -0.007905
Cumulative returns -0.015717
Annual volatility 0.097907
Sharpe ratio -0.032152
Calmar ratio -0.050407
Stability 0.353511
Max drawdown -0.156831
Omega ratio 0.993158
Sortino ratio -0.044483
Skew -0.219349
Kurtosis 9.495171
Tail ratio 0.946898
Daily value at risk -0.012348
Alpha 0.000000
Beta 1.000000
dtype: float64
###Markdown
7.2 BackTestPlot
###Code
print("==============Compare to DJIA===========")
%matplotlib inline
# S&P 500: ^GSPC
# Dow Jones Index: ^DJI
# NASDAQ 100: ^NDX
backtest_plot(df_account_value,
baseline_ticker = '^DJI',
baseline_start = '2019-01-01',
baseline_end = '2021-01-01', value_col_name = 'total_assets')
###Output
==============Compare to DJIA===========
annual return: -0.31429838045686775
sharpe ratio: -0.03215222673502538
[*********************100%***********************] 1 of 1 completed
Shape of DataFrame: (505, 8)
###Markdown
7.3 Baseline Stats
###Code
print("==============Get Baseline Stats===========")
baseline_df = get_baseline(
ticker="^DJI",
start = '2019-01-01',
end = '2021-01-01')
baseline_stats = backtest_stats(baseline_df, value_col_name = 'close')
###Output
==============Get Baseline Stats===========
[*********************100%***********************] 1 of 1 completed
Shape of DataFrame: (505, 8)
Annual return 0.144674
Cumulative returns 0.310981
Annual volatility 0.274619
Sharpe ratio 0.631418
Calmar ratio 0.390102
Stability 0.116677
Max drawdown -0.370862
Omega ratio 1.149365
Sortino ratio 0.870084
Skew NaN
Kurtosis NaN
Tail ratio 0.860710
Daily value at risk -0.033911
Alpha 0.000000
Beta 1.000000
dtype: float64
|
examples/vision/ipynb/deeplabv3_plus.ipynb | ###Markdown
Multiclass semantic segmentation using DeepLabV3+**Author:** [Soumik Rakshit](http://github.com/soumik12345)**Date created:** 2021/08/31**Last modified:** 2021/09/1**Description:** Implement DeepLabV3+ architecture for Multi-class Semantic Segmentation. IntroductionSemantic segmentation, with the goal to assign semantic labels to every pixel in an image,is an essential computer vision task. In this example, we implementthe **DeepLabV3+** model for multi-class semantic segmentation, a fully-convolutionalarchitecture that performs well on semantic segmentation benchmarks. References:- [Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation](https://arxiv.org/pdf/1802.02611.pdf)- [Rethinking Atrous Convolution for Semantic Image Segmentation](https://arxiv.org/abs/1706.05587)- [DeepLab: Semantic Image Segmentation with Deep Convolutional Nets, Atrous Convolution, and Fully Connected CRFs](https://arxiv.org/abs/1606.00915) Downloading the dataWe will use the [Crowd Instance-level Human Parsing Dataset](https://arxiv.org/abs/1811.12596)for training our model. The Crowd Instance-level Human Parsing (CIHP) dataset has 38,280 diverse human images.Each image in CIHP is labeled with pixel-wise annotations for 20 categories, as well as instance-level identification.This dataset can be used for the "human part segmentation" task.
###Code
import os
import cv2
import numpy as np
from glob import glob
from scipy.io import loadmat
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
!gdown https://drive.google.com/uc?id=1B9A9UCJYMwTL4oBEo4RZfbMZMaZhKJaz
!unzip -q instance-level-human-parsing.zip
###Output
_____no_output_____
###Markdown
Creating a TensorFlow DatasetTraining on the entire CIHP dataset with 38,280 images takes a lot of time, hence we will be usinga smaller subset of 200 images for training our model in this example.
###Code
IMAGE_SIZE = 512
BATCH_SIZE = 4
NUM_CLASSES = 20
DATA_DIR = "./instance-level_human_parsing/instance-level_human_parsing/Training"
MAX_IMAGES = 200
train_images = sorted(glob(os.path.join(DATA_DIR, "Images/*")))[:MAX_IMAGES]
train_masks = sorted(glob(os.path.join(DATA_DIR, "Category_ids/*")))[:MAX_IMAGES]
def read_image(image_path, mask=False):
image = tf.io.read_file(image_path)
if mask:
image = tf.image.decode_png(image, channels=1)
image.set_shape([None, None, 1])
image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
else:
image = tf.image.decode_png(image, channels=3)
image.set_shape([None, None, 3])
image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
image = image / 127.5 - 1
return image
def load_data(image_list, mask_list):
image = read_image(image_list)
mask = read_image(mask_list, mask=True)
return image, mask
def data_generator(image_list, mask_list):
dataset = tf.data.Dataset.from_tensor_slices((image_list, mask_list))
dataset = dataset.map(load_data, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
return dataset
dataset = data_generator(train_images, train_masks)
dataset
###Output
_____no_output_____
###Markdown
Building the DeepLabV3+ modelDeepLabv3+ extends DeepLabv3 by adding an encoder-decoder structure. The encoder moduleprocesses multiscale contextual information by applying dilated convolution at multiplescales, while the decoder module refines the segmentation results along object boundaries.**Dilated convolution:** With dilated convolution, as we go deeper in the network, we can keep thestride constant but with larger field-of-view without increasing the number of parametersor the amount of computation. Besides, it enables larger output feature maps, which isuseful for semantic segmentation.The reason for using **Dilated Spatial Pyramid Pooling** is that it was shown that as thesampling rate becomes larger, the number of valid filter weights (i.e., weights thatare applied to the valid feature region, instead of padded zeros) becomes smaller.
###Code
def convolution_block(
block_input,
num_filters=256,
kernel_size=3,
dilation_rate=1,
padding="same",
use_bias=False,
):
x = layers.Conv2D(
num_filters,
kernel_size=kernel_size,
dilation_rate=dilation_rate,
padding="same",
use_bias=use_bias,
kernel_initializer=keras.initializers.HeNormal(),
)(block_input)
x = layers.BatchNormalization()(x)
return tf.nn.relu(x)
def DilatedSpatialPyramidPooling(dspp_input):
dims = dspp_input.shape
x = layers.AveragePooling2D(pool_size=(dims[-3], dims[-2]))(dspp_input)
x = convolution_block(x, kernel_size=1, use_bias=True)
out_pool = layers.UpSampling2D(
size=(dims[-3] // x.shape[1], dims[-2] // x.shape[2]), interpolation="bilinear",
)(x)
out_1 = convolution_block(dspp_input, kernel_size=1, dilation_rate=1)
out_6 = convolution_block(dspp_input, kernel_size=3, dilation_rate=6)
out_12 = convolution_block(dspp_input, kernel_size=3, dilation_rate=12)
out_18 = convolution_block(dspp_input, kernel_size=3, dilation_rate=18)
x = layers.Concatenate(axis=-1)([out_pool, out_1, out_6, out_12, out_18])
output = convolution_block(x, kernel_size=1)
return output
###Output
_____no_output_____
###Markdown
The encoder features are first bilinearly upsampled by a factor 4, and thenconcatenated with the corresponding low-level features from the network backbone thathave the same spatial resolution. For this example, weuse a ResNet50 pretrained on ImageNet as the backbone model, and we usethe low-level features from the `conv4_block6_2_relu` block of the backbone.
###Code
def DeeplabV3Plus(image_size, num_classes):
model_input = keras.Input(shape=(image_size, image_size, 3))
resnet50 = keras.applications.ResNet50(
weights="imagenet", include_top=False, input_tensor=model_input
)
x = resnet50.get_layer("conv4_block6_2_relu").output
x = DilatedSpatialPyramidPooling(x)
input_a = layers.UpSampling2D(
size=(image_size // 4 // x.shape[1], image_size // 4 // x.shape[2]),
interpolation="bilinear",
)(x)
input_b = resnet50.get_layer("conv2_block3_2_relu").output
input_b = convolution_block(input_b, num_filters=48, kernel_size=1)
x = layers.Concatenate(axis=-1)([input_a, input_b])
x = convolution_block(x)
x = convolution_block(x)
x = layers.UpSampling2D(
size=(image_size // x.shape[1], image_size // x.shape[2]),
interpolation="bilinear",
)(x)
model_output = layers.Conv2D(num_classes, kernel_size=(1, 1), padding="same")(x)
return keras.Model(inputs=model_input, outputs=model_output)
model = DeeplabV3Plus(image_size=IMAGE_SIZE, num_classes=NUM_CLASSES)
model.summary()
###Output
_____no_output_____
###Markdown
TrainingWe train the model using sparse categorical crossentropy as the loss function, andAdam as the optimizer.
###Code
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.001),
loss=loss,
metrics=["accuracy"],
)
history = model.fit(dataset, epochs=25)
plt.plot(history.history["loss"])
plt.title("Training Loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.show()
plt.plot(history.history["accuracy"])
plt.title("Training Accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.show()
###Output
_____no_output_____
###Markdown
Inference using Colormap OverlayThe raw predictions from the model represent a one-hot encoded tensor of shape `(N, 512, 512, 20)`where each one of the 20 channels is a binary mask corresponding to a predicted label.In order to visualize the results, we plot them as RGB segmentation masks where each pixelis represented by a unique color corresponding to the particular label predicted. We can easilyfind the color corresponding to each label from the `human_colormap.mat` file provided as partof the dataset. We would also plot an overlay of the RGB segmentation mask on the input image asthis further helps us to identify the different categories present in the image more intuitively.
###Code
# Loading the Colormap
colormap = loadmat(
"./instance-level_human_parsing/instance-level_human_parsing/human_colormap.mat"
)["colormap"]
colormap = colormap * 100
colormap = colormap.astype(np.uint8)
def infer(model, image_tensor):
predictions = model.predict(np.expand_dims((image_tensor), axis=0))
predictions = np.squeeze(predictions)
predictions = np.argmax(predictions, axis=2)
return predictions
def decode_segmentation_masks(mask, colormap, n_classes):
r = np.zeros_like(mask).astype(np.uint8)
g = np.zeros_like(mask).astype(np.uint8)
b = np.zeros_like(mask).astype(np.uint8)
for l in range(0, n_classes):
idx = mask == l
r[idx] = colormap[l, 0]
g[idx] = colormap[l, 1]
b[idx] = colormap[l, 2]
rgb = np.stack([r, g, b], axis=2)
return rgb
def get_overlay(image, colored_mask):
image = tf.keras.preprocessing.image.array_to_img(image)
image = np.array(image).astype(np.uint8)
overlay = cv2.addWeighted(image, 0.35, colored_mask, 0.65, 0)
return overlay
def plot_samples_matplotlib(display_list, figsize=(5, 3)):
_, axes = plt.subplots(nrows=1, ncols=len(display_list), figsize=figsize)
for i in range(len(display_list)):
if display_list[i].shape[-1] == 3:
axes[i].imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]))
else:
axes[i].imshow(display_list[i])
plt.show()
def plot_predictions(images_list, colormap, model):
for image_file in images_list:
image_tensor = read_image(image_file)
prediction_mask = infer(image_tensor=image_tensor, model=model)
prediction_colormap = decode_segmentation_masks(prediction_mask, colormap, 20)
overlay = get_overlay(image_tensor, prediction_colormap)
plot_samples_matplotlib(
[image_tensor, overlay, prediction_colormap], figsize=(18, 14)
)
plot_predictions(train_images[:4], colormap, model=model)
###Output
_____no_output_____
###Markdown
Multiclass semantic segmentation using DeepLabV3+**Author:** [Soumik Rakshit](http://github.com/soumik12345)**Date created:** 2021/08/31**Last modified:** 2021/09/1**Description:** Implement DeepLabV3+ architecture for Multi-class Semantic Segmentation. IntroductionSemantic segmentation, with the goal to assign semantic labels to every pixel in an image,is an essential computer vision task. In this example, we implementthe **DeepLabV3+** model for multi-class semantic segmentation, a fully-convolutionalarchitecture that performs well on semantic segmentation benchmarks. References:- [Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation](https://arxiv.org/pdf/1802.02611.pdf)- [Rethinking Atrous Convolution for Semantic Image Segmentation](https://arxiv.org/abs/1706.05587)- [DeepLab: Semantic Image Segmentation with Deep Convolutional Nets, Atrous Convolution, and Fully Connected CRFs](https://arxiv.org/abs/1606.00915) Downloading the dataWe will use the [Crowd Instance-level Human Parsing Dataset](https://arxiv.org/abs/1811.12596)for training our model. The Crowd Instance-level Human Parsing (CIHP) dataset has 38,280 diverse human images.Each image in CIHP is labeled with pixel-wise annotations for 20 categories, as well as instance-level identification.This dataset can be used for the "human part segmentation" task.
###Code
import os
import cv2
import numpy as np
from glob import glob
from scipy.io import loadmat
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
!gdown https://drive.google.com/uc?id=1B9A9UCJYMwTL4oBEo4RZfbMZMaZhKJaz
!unzip -q instance-level-human-parsing.zip
###Output
_____no_output_____
###Markdown
Creating a TensorFlow DatasetTraining on the entire CIHP dataset with 38,280 images takes a lot of time, hence we will be usinga smaller subset of 200 images for training our model in this example.
###Code
IMAGE_SIZE = 512
BATCH_SIZE = 4
NUM_CLASSES = 20
DATA_DIR = "./instance-level_human_parsing/instance-level_human_parsing/Training"
NUM_TRAIN_IMAGES = 1000
NUM_VAL_IMAGES = 50
train_images = sorted(glob(os.path.join(DATA_DIR, "Images/*")))[:NUM_TRAIN_IMAGES]
train_masks = sorted(glob(os.path.join(DATA_DIR, "Category_ids/*")))[:NUM_TRAIN_IMAGES]
val_images = sorted(glob(os.path.join(DATA_DIR, "Images/*")))[
NUM_TRAIN_IMAGES : NUM_VAL_IMAGES + NUM_TRAIN_IMAGES
]
val_masks = sorted(glob(os.path.join(DATA_DIR, "Category_ids/*")))[
NUM_TRAIN_IMAGES : NUM_VAL_IMAGES + NUM_TRAIN_IMAGES
]
def read_image(image_path, mask=False):
image = tf.io.read_file(image_path)
if mask:
image = tf.image.decode_png(image, channels=1)
image.set_shape([None, None, 1])
image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
else:
image = tf.image.decode_png(image, channels=3)
image.set_shape([None, None, 3])
image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
image = image / 127.5 - 1
return image
def load_data(image_list, mask_list):
image = read_image(image_list)
mask = read_image(mask_list, mask=True)
return image, mask
def data_generator(image_list, mask_list):
dataset = tf.data.Dataset.from_tensor_slices((image_list, mask_list))
dataset = dataset.map(load_data, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
return dataset
train_dataset = data_generator(train_images, train_masks)
val_dataset = data_generator(val_images, val_masks)
print("Train Dataset:", train_dataset)
print("Val Dataset:", val_dataset)
###Output
_____no_output_____
###Markdown
Building the DeepLabV3+ modelDeepLabv3+ extends DeepLabv3 by adding an encoder-decoder structure. The encoder moduleprocesses multiscale contextual information by applying dilated convolution at multiplescales, while the decoder module refines the segmentation results along object boundaries.**Dilated convolution:** With dilated convolution, as we go deeper in the network, we can keep thestride constant but with larger field-of-view without increasing the number of parametersor the amount of computation. Besides, it enables larger output feature maps, which isuseful for semantic segmentation.The reason for using **Dilated Spatial Pyramid Pooling** is that it was shown that as thesampling rate becomes larger, the number of valid filter weights (i.e., weights thatare applied to the valid feature region, instead of padded zeros) becomes smaller.
###Code
def convolution_block(
block_input,
num_filters=256,
kernel_size=3,
dilation_rate=1,
padding="same",
use_bias=False,
):
x = layers.Conv2D(
num_filters,
kernel_size=kernel_size,
dilation_rate=dilation_rate,
padding="same",
use_bias=use_bias,
kernel_initializer=keras.initializers.HeNormal(),
)(block_input)
x = layers.BatchNormalization()(x)
return tf.nn.relu(x)
def DilatedSpatialPyramidPooling(dspp_input):
dims = dspp_input.shape
x = layers.AveragePooling2D(pool_size=(dims[-3], dims[-2]))(dspp_input)
x = convolution_block(x, kernel_size=1, use_bias=True)
out_pool = layers.UpSampling2D(
size=(dims[-3] // x.shape[1], dims[-2] // x.shape[2]), interpolation="bilinear",
)(x)
out_1 = convolution_block(dspp_input, kernel_size=1, dilation_rate=1)
out_6 = convolution_block(dspp_input, kernel_size=3, dilation_rate=6)
out_12 = convolution_block(dspp_input, kernel_size=3, dilation_rate=12)
out_18 = convolution_block(dspp_input, kernel_size=3, dilation_rate=18)
x = layers.Concatenate(axis=-1)([out_pool, out_1, out_6, out_12, out_18])
output = convolution_block(x, kernel_size=1)
return output
###Output
_____no_output_____
###Markdown
The encoder features are first bilinearly upsampled by a factor 4, and thenconcatenated with the corresponding low-level features from the network backbone thathave the same spatial resolution. For this example, weuse a ResNet50 pretrained on ImageNet as the backbone model, and we usethe low-level features from the `conv4_block6_2_relu` block of the backbone.
###Code
def DeeplabV3Plus(image_size, num_classes):
model_input = keras.Input(shape=(image_size, image_size, 3))
resnet50 = keras.applications.ResNet50(
weights="imagenet", include_top=False, input_tensor=model_input
)
x = resnet50.get_layer("conv4_block6_2_relu").output
x = DilatedSpatialPyramidPooling(x)
input_a = layers.UpSampling2D(
size=(image_size // 4 // x.shape[1], image_size // 4 // x.shape[2]),
interpolation="bilinear",
)(x)
input_b = resnet50.get_layer("conv2_block3_2_relu").output
input_b = convolution_block(input_b, num_filters=48, kernel_size=1)
x = layers.Concatenate(axis=-1)([input_a, input_b])
x = convolution_block(x)
x = convolution_block(x)
x = layers.UpSampling2D(
size=(image_size // x.shape[1], image_size // x.shape[2]),
interpolation="bilinear",
)(x)
model_output = layers.Conv2D(num_classes, kernel_size=(1, 1), padding="same")(x)
return keras.Model(inputs=model_input, outputs=model_output)
model = DeeplabV3Plus(image_size=IMAGE_SIZE, num_classes=NUM_CLASSES)
model.summary()
###Output
_____no_output_____
###Markdown
TrainingWe train the model using sparse categorical crossentropy as the loss function, andAdam as the optimizer.
###Code
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.001),
loss=loss,
metrics=["accuracy"],
)
history = model.fit(train_dataset, validation_data=val_dataset, epochs=25)
plt.plot(history.history["loss"])
plt.title("Training Loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.show()
plt.plot(history.history["accuracy"])
plt.title("Training Accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.show()
plt.plot(history.history["val_loss"])
plt.title("Validation Loss")
plt.ylabel("val_loss")
plt.xlabel("epoch")
plt.show()
plt.plot(history.history["val_accuracy"])
plt.title("Validation Accuracy")
plt.ylabel("val_accuracy")
plt.xlabel("epoch")
plt.show()
###Output
_____no_output_____
###Markdown
Inference using Colormap OverlayThe raw predictions from the model represent a one-hot encoded tensor of shape `(N, 512, 512, 20)`where each one of the 20 channels is a binary mask corresponding to a predicted label.In order to visualize the results, we plot them as RGB segmentation masks where each pixelis represented by a unique color corresponding to the particular label predicted. We can easilyfind the color corresponding to each label from the `human_colormap.mat` file provided as partof the dataset. We would also plot an overlay of the RGB segmentation mask on the input image asthis further helps us to identify the different categories present in the image more intuitively.
###Code
# Loading the Colormap
colormap = loadmat(
"./instance-level_human_parsing/instance-level_human_parsing/human_colormap.mat"
)["colormap"]
colormap = colormap * 100
colormap = colormap.astype(np.uint8)
def infer(model, image_tensor):
predictions = model.predict(np.expand_dims((image_tensor), axis=0))
predictions = np.squeeze(predictions)
predictions = np.argmax(predictions, axis=2)
return predictions
def decode_segmentation_masks(mask, colormap, n_classes):
r = np.zeros_like(mask).astype(np.uint8)
g = np.zeros_like(mask).astype(np.uint8)
b = np.zeros_like(mask).astype(np.uint8)
for l in range(0, n_classes):
idx = mask == l
r[idx] = colormap[l, 0]
g[idx] = colormap[l, 1]
b[idx] = colormap[l, 2]
rgb = np.stack([r, g, b], axis=2)
return rgb
def get_overlay(image, colored_mask):
image = tf.keras.preprocessing.image.array_to_img(image)
image = np.array(image).astype(np.uint8)
overlay = cv2.addWeighted(image, 0.35, colored_mask, 0.65, 0)
return overlay
def plot_samples_matplotlib(display_list, figsize=(5, 3)):
_, axes = plt.subplots(nrows=1, ncols=len(display_list), figsize=figsize)
for i in range(len(display_list)):
if display_list[i].shape[-1] == 3:
axes[i].imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]))
else:
axes[i].imshow(display_list[i])
plt.show()
def plot_predictions(images_list, colormap, model):
for image_file in images_list:
image_tensor = read_image(image_file)
prediction_mask = infer(image_tensor=image_tensor, model=model)
prediction_colormap = decode_segmentation_masks(prediction_mask, colormap, 20)
overlay = get_overlay(image_tensor, prediction_colormap)
plot_samples_matplotlib(
[image_tensor, overlay, prediction_colormap], figsize=(18, 14)
)
###Output
_____no_output_____
###Markdown
Inference on Train Images
###Code
plot_predictions(train_images[:4], colormap, model=model)
###Output
_____no_output_____
###Markdown
Inference on Validation ImagesYou can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/deeplabv3p-resnet50) and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/Human-Part-Segmentation).
###Code
plot_predictions(val_images[:4], colormap, model=model)
###Output
_____no_output_____
###Markdown
Multiclass semantic segmentation using DeepLabV3+**Author:** [Soumik Rakshit](http://github.com/soumik12345)**Date created:** 2021/08/31**Last modified:** 2021/09/1**Description:** Implement DeepLabV3+ architecture for Multi-class Semantic Segmentation. IntroductionSemantic segmentation, with the goal to assign semantic labels to every pixel in an image,is an essential computer vision task. In this example, we implementthe **DeepLabV3+** model for multi-class semantic segmentation, a fully-convolutionalarchitecture that performs well on semantic segmentation benchmarks. References:- [Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation](https://arxiv.org/pdf/1802.02611.pdf)- [Rethinking Atrous Convolution for Semantic Image Segmentation](https://arxiv.org/abs/1706.05587)- [DeepLab: Semantic Image Segmentation with Deep Convolutional Nets, Atrous Convolution, and Fully Connected CRFs](https://arxiv.org/abs/1606.00915) Downloading the dataWe will use the [Crowd Instance-level Human Parsing Dataset](https://arxiv.org/abs/1811.12596)for training our model. The Crowd Instance-level Human Parsing (CIHP) dataset has 38,280 diverse human images.Each image in CIHP is labeled with pixel-wise annotations for 20 categories, as well as instance-level identification.This dataset can be used for the "human part segmentation" task.
###Code
import os
import cv2
import numpy as np
from glob import glob
from scipy.io import loadmat
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
!gdown https://drive.google.com/uc?id=1B9A9UCJYMwTL4oBEo4RZfbMZMaZhKJaz
!unzip -q instance-level-human-parsing.zip
###Output
_____no_output_____
###Markdown
Creating a TensorFlow DatasetTraining on the entire CIHP dataset with 38,280 images takes a lot of time, hence we will be usinga smaller subset of 200 images for training our model in this example.
###Code
IMAGE_SIZE = 512
BATCH_SIZE = 4
NUM_CLASSES = 20
DATA_DIR = "./instance-level_human_parsing/instance-level_human_parsing/Training"
NUM_TRAIN_IMAGES = 1000
NUM_VAL_IMAGES = 50
train_images = sorted(glob(os.path.join(DATA_DIR, "Images/*")))[:NUM_TRAIN_IMAGES]
train_masks = sorted(glob(os.path.join(DATA_DIR, "Category_ids/*")))[:NUM_TRAIN_IMAGES]
val_images = sorted(glob(os.path.join(DATA_DIR, "Images/*")))[
NUM_TRAIN_IMAGES : NUM_VAL_IMAGES + NUM_TRAIN_IMAGES
]
val_masks = sorted(glob(os.path.join(DATA_DIR, "Category_ids/*")))[
NUM_TRAIN_IMAGES : NUM_VAL_IMAGES + NUM_TRAIN_IMAGES
]
def read_image(image_path, mask=False):
image = tf.io.read_file(image_path)
if mask:
image = tf.image.decode_png(image, channels=1)
image.set_shape([None, None, 1])
image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
else:
image = tf.image.decode_png(image, channels=3)
image.set_shape([None, None, 3])
image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
image = image / 127.5 - 1
return image
def load_data(image_list, mask_list):
image = read_image(image_list)
mask = read_image(mask_list, mask=True)
return image, mask
def data_generator(image_list, mask_list):
dataset = tf.data.Dataset.from_tensor_slices((image_list, mask_list))
dataset = dataset.map(load_data, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
return dataset
train_dataset = data_generator(train_images, train_masks)
val_dataset = data_generator(val_images, val_masks)
print("Train Dataset:", train_dataset)
print("Val Dataset:", val_dataset)
###Output
_____no_output_____
###Markdown
Building the DeepLabV3+ modelDeepLabv3+ extends DeepLabv3 by adding an encoder-decoder structure. The encoder moduleprocesses multiscale contextual information by applying dilated convolution at multiplescales, while the decoder module refines the segmentation results along object boundaries.**Dilated convolution:** With dilated convolution, as we go deeper in the network, we can keep thestride constant but with larger field-of-view without increasing the number of parametersor the amount of computation. Besides, it enables larger output feature maps, which isuseful for semantic segmentation.The reason for using **Dilated Spatial Pyramid Pooling** is that it was shown that as thesampling rate becomes larger, the number of valid filter weights (i.e., weights thatare applied to the valid feature region, instead of padded zeros) becomes smaller.
###Code
def convolution_block(
block_input,
num_filters=256,
kernel_size=3,
dilation_rate=1,
padding="same",
use_bias=False,
):
x = layers.Conv2D(
num_filters,
kernel_size=kernel_size,
dilation_rate=dilation_rate,
padding="same",
use_bias=use_bias,
kernel_initializer=keras.initializers.HeNormal(),
)(block_input)
x = layers.BatchNormalization()(x)
return tf.nn.relu(x)
def DilatedSpatialPyramidPooling(dspp_input):
dims = dspp_input.shape
x = layers.AveragePooling2D(pool_size=(dims[-3], dims[-2]))(dspp_input)
x = convolution_block(x, kernel_size=1, use_bias=True)
out_pool = layers.UpSampling2D(
size=(dims[-3] // x.shape[1], dims[-2] // x.shape[2]), interpolation="bilinear",
)(x)
out_1 = convolution_block(dspp_input, kernel_size=1, dilation_rate=1)
out_6 = convolution_block(dspp_input, kernel_size=3, dilation_rate=6)
out_12 = convolution_block(dspp_input, kernel_size=3, dilation_rate=12)
out_18 = convolution_block(dspp_input, kernel_size=3, dilation_rate=18)
x = layers.Concatenate(axis=-1)([out_pool, out_1, out_6, out_12, out_18])
output = convolution_block(x, kernel_size=1)
return output
###Output
_____no_output_____
###Markdown
The encoder features are first bilinearly upsampled by a factor 4, and thenconcatenated with the corresponding low-level features from the network backbone thathave the same spatial resolution. For this example, weuse a ResNet50 pretrained on ImageNet as the backbone model, and we usethe low-level features from the `conv4_block6_2_relu` block of the backbone.
###Code
def DeeplabV3Plus(image_size, num_classes):
model_input = keras.Input(shape=(image_size, image_size, 3))
resnet50 = keras.applications.ResNet50(
weights="imagenet", include_top=False, input_tensor=model_input
)
x = resnet50.get_layer("conv4_block6_2_relu").output
x = DilatedSpatialPyramidPooling(x)
input_a = layers.UpSampling2D(
size=(image_size // 4 // x.shape[1], image_size // 4 // x.shape[2]),
interpolation="bilinear",
)(x)
input_b = resnet50.get_layer("conv2_block3_2_relu").output
input_b = convolution_block(input_b, num_filters=48, kernel_size=1)
x = layers.Concatenate(axis=-1)([input_a, input_b])
x = convolution_block(x)
x = convolution_block(x)
x = layers.UpSampling2D(
size=(image_size // x.shape[1], image_size // x.shape[2]),
interpolation="bilinear",
)(x)
model_output = layers.Conv2D(num_classes, kernel_size=(1, 1), padding="same")(x)
return keras.Model(inputs=model_input, outputs=model_output)
model = DeeplabV3Plus(image_size=IMAGE_SIZE, num_classes=NUM_CLASSES)
model.summary()
###Output
_____no_output_____
###Markdown
TrainingWe train the model using sparse categorical crossentropy as the loss function, andAdam as the optimizer.
###Code
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.001),
loss=loss,
metrics=["accuracy"],
)
history = model.fit(train_dataset, validation_data=val_dataset, epochs=25)
plt.plot(history.history["loss"])
plt.title("Training Loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.show()
plt.plot(history.history["accuracy"])
plt.title("Training Accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.show()
plt.plot(history.history["val_loss"])
plt.title("Validation Loss")
plt.ylabel("val_loss")
plt.xlabel("epoch")
plt.show()
plt.plot(history.history["val_accuracy"])
plt.title("Validation Accuracy")
plt.ylabel("val_accuracy")
plt.xlabel("epoch")
plt.show()
###Output
_____no_output_____
###Markdown
Inference using Colormap OverlayThe raw predictions from the model represent a one-hot encoded tensor of shape `(N, 512, 512, 20)`where each one of the 20 channels is a binary mask corresponding to a predicted label.In order to visualize the results, we plot them as RGB segmentation masks where each pixelis represented by a unique color corresponding to the particular label predicted. We can easilyfind the color corresponding to each label from the `human_colormap.mat` file provided as partof the dataset. We would also plot an overlay of the RGB segmentation mask on the input image asthis further helps us to identify the different categories present in the image more intuitively.
###Code
# Loading the Colormap
colormap = loadmat(
"./instance-level_human_parsing/instance-level_human_parsing/human_colormap.mat"
)["colormap"]
colormap = colormap * 100
colormap = colormap.astype(np.uint8)
def infer(model, image_tensor):
predictions = model.predict(np.expand_dims((image_tensor), axis=0))
predictions = np.squeeze(predictions)
predictions = np.argmax(predictions, axis=2)
return predictions
def decode_segmentation_masks(mask, colormap, n_classes):
r = np.zeros_like(mask).astype(np.uint8)
g = np.zeros_like(mask).astype(np.uint8)
b = np.zeros_like(mask).astype(np.uint8)
for l in range(0, n_classes):
idx = mask == l
r[idx] = colormap[l, 0]
g[idx] = colormap[l, 1]
b[idx] = colormap[l, 2]
rgb = np.stack([r, g, b], axis=2)
return rgb
def get_overlay(image, colored_mask):
image = tf.keras.preprocessing.image.array_to_img(image)
image = np.array(image).astype(np.uint8)
overlay = cv2.addWeighted(image, 0.35, colored_mask, 0.65, 0)
return overlay
def plot_samples_matplotlib(display_list, figsize=(5, 3)):
_, axes = plt.subplots(nrows=1, ncols=len(display_list), figsize=figsize)
for i in range(len(display_list)):
if display_list[i].shape[-1] == 3:
axes[i].imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]))
else:
axes[i].imshow(display_list[i])
plt.show()
def plot_predictions(images_list, colormap, model):
for image_file in images_list:
image_tensor = read_image(image_file)
prediction_mask = infer(image_tensor=image_tensor, model=model)
prediction_colormap = decode_segmentation_masks(prediction_mask, colormap, 20)
overlay = get_overlay(image_tensor, prediction_colormap)
plot_samples_matplotlib(
[image_tensor, overlay, prediction_colormap], figsize=(18, 14)
)
###Output
_____no_output_____
###Markdown
Inference on Train Images
###Code
plot_predictions(train_images[:4], colormap, model=model)
###Output
_____no_output_____
###Markdown
Inference on Validation Images
###Code
plot_predictions(val_images[:4], colormap, model=model)
###Output
_____no_output_____ |
ImageCollageProject/ImageCollageMaker.ipynb | ###Markdown
Goal: Allow the user to create a directory of small images (of the same size?) and use them as a data set to mask an image. This example code will try to find the closest match based on RGB similarties between image region and sample images.
###Code
from PIL import Image
from os import listdir
from IPython.core.display import display, HTML
# NOTE: update to work with different image source
#emojiCharSet_24x24_35.png
# Read average RGB values for this image
def imageRGBs( image ):
#image = 'files/imageset/csharp.png'
im = Image.open(image)
width, height = im.size
pix = im.load()
r = 0
g = 0
b = 0
count = width*height
for w in range(width):
for h in range(height):
r += pix[w,h][0]
g += pix[w,h][1]
b += pix[w,h][2]
print(image,(int(r/count), int(g/count),int(b/count)))
return (image,(int(r/count), int(g/count),int(b/count)))
def makeCharImageSet( directory, height ):
'''inputs: directory of images, height in pixels of output imageset
outputs: filename of output imageset'''
width = height*len(listdir(directory))
# Create shell of resulting image
resultImg = Image.new('RGB', (width,height))
count = 0
for image in listdir(directory):
path = directory + '/' + image
im = Image.open( path )
resultImg.paste(im, (count*height,0))
count += 1
resultImg.save('charImageSet.jpg','JPEG')
display(HTML('<img src="charImageSet.jpg">'))
return 'charImageSet.jpg'
def get_RGB_dictionary(imageset, size):
'''inputs: image containing uniform sized icons, size in pixels of each icon
outputs: a list of icon index and average RGB values'''
results = [] # store image index and average RGB tuples (1, (0,0,0))
im = Image.open(imageset)
width, height = im.size
pix = im.load()
index = 0
for box in range(0,width,size): # 0, 48, 96... moving x starting place
r = 0
g = 0
b = 0
for w in range(box, box+size):
for h in range(height):
r += pix[w,h][0]
g += pix[w,h][1]
b += pix[w,h][2]
avgR = r/(size**2)
avgG = g/(size**2)
avgB = b/(size**2)
results.append( (index, (avgR,avgG,avgB)) )
index += 1
return results
# find an image to convert to an alternate image set. open this image and scale to a certain % size
def scaleInputImage( image_path, width_multiple, height_multiple, scale):
im = Image.open(image_path)
print(im.width, im.height)
print(im.width-im.width%width_multiple,im.height-im.height%height_multiple)
imresize = im.resize(((im.width-im.width%width_multiple)*scale, (im.height-im.height%height_multiple)*scale), Image.ANTIALIAS)
newname = image_path.split('.')[0] + '_%dx%d.png' %(width_multiple,height_multiple)
imresize.save(newname)
return newname
def findClosestRGB( testTuple, replaceList ):
# testTuple in form (avgR, avgG, argB)
# replaceList in form [ (index, (avgR, avgG, avgB)), ... ]
minDiff = 3*255
minIndex = 0
for icon in replaceList:
idx = icon[0]
iR = icon[1][0]
iG = icon[1][1]
iB = icon[1][2]
diff = abs(testTuple[0]-iR) + abs(testTuple[1]-iG) + abs(testTuple[2]-iB)
if diff < minDiff:
minIndex = idx
minDiff = diff
return minIndex
# input arguments:
imageDirectory = 'imageset'
size = 48
testImage = 'test.jpg'
# STEP 1: Create a single image set out of a directory of sample images.
replaceImage = makeCharImageSet(imageDirectory, size)
print('(1) replacement imageset created:', replaceImage)
# STEP 2: Scan through the replaceImage file and create a list of
# replaceImage avgerage RGB values
replaceRGBlist = get_RGB_dictionary(replaceImage, size)
print('(2) list of replacment RGB values created: %d items' %(len(replaceRGBlist)))
#print(replaceRGBlist,'\n')
# STEP 3: Scale test image so that it has %size dimensions
# TODO: update algorithm below so that this isn't required
testImage = scaleInputImage(testImage, size, size, 4)
print('(3) test image scaled. ready to be processed:', testImage)
# STEP 4:
# scan through the scaled test image in chuncks of 48x48 (or custom) and find the average RGB for each region
# next, compare that average RGB against the test set to find the closest match.
# finally, replace this image with the test set image.
im = Image.open(testImage)
pix = im.load()
replaceImage = Image.open(replaceImage)
#charpix = charimg.load()
for boxX in range(0,im.width,size):
for boxY in range(0,im.height,size):
# loop over each box
r = 0
g = 0
b = 0
for x in range(boxX,boxX+size):
for y in range(boxY, boxY+size):
r += pix[x,y][0]
g += pix[x,y][1]
b += pix[x,y][2]
# average the r,g,b pixel values
rAvg = r/(size*size)
gAvg = g/(size*size)
bAvg = b/(size*size)
avgRGB = (rAvg, gAvg, bAvg)
# next, compare this tuple of averaged RGB values
# against the averaged RGB tuples in the replacement image set
# the method will return the location in the replacement image
# set with the minimum difference
index = findClosestRGB( avgRGB, replaceRGBlist )
#print(avgRGB,'index',index)
#top,bot = topBot(pix, i, j, 48, 48)
#index = picChar(_charDict, (top,bot))
chop = replaceImage.crop((index*size,0,index*size+size,size))
im.paste(chop, (boxX,boxY,boxX+size,boxY+size))
im.save('result.png')
print('Done.')
###Output
_____no_output_____ |
Object tracking and Localization/matrices_nd_Transformation_state/2_matrices_in_python.ipynb | ###Markdown
Coding MatricesHere are a few exercises to get you started with coding matrices. The exercises start off with vectors and then get more challenging Vectors
###Code
### TODO: Assign the vector <5, 10, 2, 6, 1> to the variable v
v = [5, 10, 2, 6, 1]
###Output
_____no_output_____
###Markdown
The v variable contains a Python list. This list could also be thought of as a 1x5 matrix with 1 row and 5 columns. How would you represent this list as a matrix?
###Code
### TODO: Assign the vector <5, 10, 2, 6, 1> to the variable mv
### The difference between a vector and a matrix in Python is that
### a matrix is a list of lists.
### Hint: See the last quiz on the previous page
mv = [[5, 10, 2, 6, 1]]
###Output
_____no_output_____
###Markdown
How would you represent this vector in its vertical form with 5 rows and 1 column? When defining matrices in Python, each row is a list. So in this case, you have 5 rows and thus will need 5 lists.As an example, this is what the vector $$$$ would look like as a 1x2 matrix in Python: ```pythonmatrix1by2 = [ [5, 7]]```And here is what the same vector would look like as a 2x1 matrix:```pythonmatrix2by1 = [ [5], [7]]```
###Code
### TODO: Assign the vector <5, 10, 2, 6, 1> to the variable vT
### vT is a 5x1 matrix
vT = [
[5],
[10],
[2],
[6],
[1]
]
###Output
_____no_output_____
###Markdown
Assigning Matrices to Variables
###Code
### TODO: Assign the following matrix to the variable m
### 8 7 1 2 3
### 1 5 2 9 0
### 8 2 2 4 1
m = [[8, 7, 1, 2, 3],
[1, 5, 2, 9, 0],
[8, 2, 2, 4, 1]]
###Output
_____no_output_____
###Markdown
Accessing Matrix Values
###Code
### TODO: In matrix m, change the value
### in the second row last column from 0 to 5
### Hint: You do not need to rewrite the entire matrix
m[1][4] = 5
###Output
_____no_output_____
###Markdown
Looping through Matrices to do MathCoding mathematical operations with matrices can be tricky. Because matrices are lists of lists, you will need to use a for loop inside another for loop. The outside for loop iterates over the rows and the inside for loop iterates over the columns.Here is some pseudo code```pythonfor i in number of rows: for j in number of columns: mymatrix[i][j]```To figure out how many times to loop over the matrix, you need to know the number of rows and number of columns. If you have a variable with a matrix in it, how could you figure out the number of rows? How could you figure out the number of columns? The [len](https://docs.python.org/2/library/functions.htmllen) function in Python might be helpful. Scalar Multiplication
###Code
### TODO: Use for loops to multiply each matrix element by 5
### Store the answer in the r variable. This is called scalar
### multiplication
###
### HINT: First write a for loop that iterates through the rows
### one row at a time
###
### Then write another for loop within the for loop that
### iterates through the columns
###
### If you used the variable i to represent rows and j
### to represent columns, then m[i][j] would give you
### access to each element in the matrix
###
### Because r is an empty list, you cannot directly assign
### a value like r[i][j] = m[i][j]. You might have to
### work on one row at a time and then use r.append(row).
r = []
for i in range(len(m)):
row = m[i]
r_ = [] # empty row for now
for j in range(len(row)):
m_ij = m[i][j]
r_ij = 5 * m_ij
r_.append(r_ij)
r.append(r_)
r
###Output
_____no_output_____
###Markdown
Printing Out a Matrix
###Code
### TODO: Write a function called matrix_print()
### that prints out a matrix in
### a way that is easy to read.
### Each element in a row should be separated by a tab
### And each row should have its own line
### You can test our your results with the m matrix
### HINT: You can use a for loop within a for loop
### In Python, the print() function will be useful
### print(5, '\t', end = '') will print out the integer 5,
### then add a tab after the 5. The end = '' makes sure that
### the print function does not print out a new line if you do
### not want a new line.
### Your output should look like this
### 8 7 1 2 3
### 1 5 2 9 5
### 8 2 2 4 1
def matrix_print(matrix):
for i in range(len(matrix)):
for j in range(len(matrix[0])):
m_ij = matrix[i][j]
print(m_ij, '\t', end="")
print('\n')
return
m = [
[8, 7, 1, 2, 3],
[1, 5, 2, 9, 5],
[8, 2, 2, 4, 1]
]
matrix_print(m)
###Output
8 7 1 2 3
1 5 2 9 5
8 2 2 4 1
###Markdown
Test Your Results
###Code
### You can run these tests to see if you have the expected
### results. If everything is correct, this cell has no output
assert v == [5, 10, 2, 6, 1]
assert mv == [
[5, 10, 2, 6, 1]
]
assert vT == [
[5],
[10],
[2],
[6],
[1]]
assert m == [
[8, 7, 1, 2, 3],
[1, 5, 2, 9, 5],
[8, 2, 2, 4, 1]
]
assert r == [
[40, 35, 5, 10, 15],
[5, 25, 10, 45, 25],
[40, 10, 10, 20, 5]
]
###Output
_____no_output_____
###Markdown
Print Out Your Results
###Code
### Run this cell to print out your answers
print(v)
print(mv)
print(vT)
print(m)
print(r)
###Output
[5, 10, 2, 6, 1]
[[5, 10, 2, 6, 1]]
[[5], [10], [2], [6], [1]]
[[8, 7, 1, 2, 3], [1, 5, 2, 9, 5], [8, 2, 2, 4, 1]]
[[40, 35, 5, 10, 15], [5, 25, 10, 45, 25], [40, 10, 10, 20, 5]]
|
udacity_ml/software_engineering/holiday_gifts/optimizing_code_holiday_gifts.ipynb | ###Markdown
Optimizing Code: Holiday GiftsIn the last example, you learned that using vectorized operations and more efficient data structures can optimize your code. Let's use these tips for one more example.Say your online gift store has one million users that each listed a gift on a wish list. You have the prices for each of these gifts stored in `gift_costs.txt`. For the holidays, you're going to give each customer their wish list gift for free if it is under 25 dollars. Now, you want to calculate the total cost of all gifts under 25 dollars to see how much you'd spend on free gifts. Here's one way you could've done it.
###Code
import time
import numpy as np
with open('gift_costs.txt') as f:
gift_costs = f.read().split('\n')
gift_costs = np.array(gift_costs).astype(int) # convert string to int
start = time.time()
total_price = 0
for cost in gift_costs:
if cost < 25:
total_price += cost * 1.08 # add cost after tax
print(total_price)
print('Duration: {} seconds'.format(time.time() - start))
###Output
32765421.24
Duration: 6.560739994049072 seconds
###Markdown
Here you iterate through each cost in the list, and check if it's less than 25. If so, you add the cost to the total price after tax. This works, but there is a much faster way to do this. Can you refactor this to run under half a second? Refactor Code**Hint:** Using numpy makes it very easy to select all the elements in an array that meet a certain condition, and then perform operations on them together all at once. You can them find the sum of what those values end up being.
###Code
start = time.time()
total_price = np.sum(gift_costs[gift_costs < 25] * 1.08) # compute the total price
print(total_price)
print('Duration: {} seconds'.format(time.time() - start))
###Output
32765421.24
Duration: 0.09631609916687012 seconds
|
Recommendations/recommendation_kmeans/recommendation_project_part2.ipynb | ###Markdown
About the Dataset
###Code
#nextcell
ratings = pd.read_csv('/Users/ankitkothari/Documents/gdrivre/UMD/MSML-602-DS/final_project/ratings_small.csv')
movies = pd.read_csv('/Users/ankitkothari/Documents/gdrivre/UMD/MSML-602-DS/final_project/movies_metadata_features.csv')
###Output
_____no_output_____
###Markdown
Data Cleaning Dropping Columns
###Code
movies.drop(columns=['Unnamed: 0'],inplace=True)
ratings = pd.merge(movies,ratings).drop(['genres','timestamp','imdb_id','overview','popularity','production_companies','production_countries','release_date','revenue','runtime','vote_average','year','vote_count','original_language'],axis=1)
usri = int(input()) #587 #15 #468
select_user = ratings.loc[ratings['userId'] == usri]
###Output
15
###Markdown
Finding Similarity Matrix Creating a Pivot Table of Title against userId for ratings
###Code
userRatings = ratings.pivot_table(index=['title'],columns=['userId'],values='rating')
userRatings = userRatings.dropna(thresh=10, axis=1).fillna(0,axis=1)
corrMatrix = userRatings.corr(method='pearson')
#corrMatrix = userRatings.corr(method='spearman')
#corrMatrix = userRatings.corr(method='kendall')
###Output
_____no_output_____
###Markdown
Creating Similarity Matrix using Pearson Correlation method
###Code
def get_similar(usrid):
similar_ratings = corrMatrix[usrid]
similar_ratings = similar_ratings.sort_values(ascending=False)
return similar_ratings
###Output
_____no_output_____
###Markdown
Recommendation
###Code
moidofotus = [0,0,0,0]
s_m = pd.DataFrame()
s_m = s_m.append(get_similar(usri), ignore_index=True)
for c in range(0,4):
moidofotus[c]=s_m.columns[c]
if moidofotus[0] == usri:
moidofotus.pop(0)
print(moidofotus)
movie_match=[]
for i in moidofotus:
select_user = ratings.loc[ratings['userId'] == i]
#print(select_user)
print("For user", i)
final_use = select_user.loc[select_user['rating'] >= 4.0].sort_values(by=['rating'],ascending=False).iloc[0:10,:]
print(final_use['title'])
movie_match.append(final_use['title'].to_list())
select_user['title']
###Output
_____no_output_____
###Markdown
Performance Evaluation
###Code
movies_suggested_and_he_watched=0
total_suggest_movies = 0
for movies in movie_match:
total_suggest_movies=total_suggest_movies+len(movies)
for movie in movies:
if movie in select_user['title'].to_list():
movies_suggested_and_he_watched=movies_suggested_and_he_watched+1
print(movies_suggested_and_he_watched)
print(total_suggest_movies)
###Output
27
30
|
notebooks/00 - visualisation.ipynb | ###Markdown
Load data
###Code
adni.load(show_output=False)
###Output
_____no_output_____
###Markdown
Display MetaData
###Code
meta_df = adni.meta_to_df()
sprint.pd_cols(meta_df)
###Output
_____no_output_____
###Markdown
Display ImageFiles
###Code
files_df = adni.files_to_df()
sprint.pd_cols(files_df)
adni_df = adni.to_df()
sprint.pd_cols(adni_df)
###Output
_____no_output_____
###Markdown
Analysis Overview
###Code
fig, axes = splot.meta_settings(rows=3)
splot.histplot(
adni_df,
x='subject.researchGroup',
hue='subject.subjectSex',
ax=axes[0,0],
plot_kws={'stat':'frequency'},
legend_kws={'title':'ResearchGroup'},
setting_kws={'title':'ResearchGroup distribution','xlabel':'Disorder'}
)
splot.histplot(
adni_df,
x='subject.subjectIdentifier',
ax=axes[0,1],
plot_kws={'stat':'frequency'},
legend_kws={'title':'ResearchGroup'},
setting_kws={'title':'SubjectIdentifier distribution','xlabel':'subjectIdentifier','rotation':90}
)
splot.histplot(
adni_df,
x='subject.subjectSex',
ax=axes[1,0],
plot_kws={'stat':'frequency'},
legend_kws={'title':'ResearchGroup'},
setting_kws={'title':'SubjectSex distribution','xlabel':'subjectSex'}
)
splot.histplot(
adni_df,
x='subject.study.subjectAge',
hue='subject.subjectSex',
discrete=False,
ax=axes[1,1],
plot_kws={'element':'poly','fill':False},
legend_kws={'title':'ResearchGroup'},
setting_kws={'title':'SubjectAge distribution'}
)
splot.histplot(
adni_df,
x='subject.study.series.dateAcquired',
hue='subject.researchGroup',
discrete=False,
ax=axes[2,0],
plot_kws={},
legend_kws={'title':'ResearchGroup'},
setting_kws={'title':'SubjectAge distribution'}
)
splot.histplot(
adni_df,
x='subject.study.weightKg',
hue='subject.subjectSex',
discrete=False,
ax=axes[2,1],
plot_kws={'element':'poly','fill':False},
legend_kws={'title':'subjectSex'},
setting_kws={'title':'weightKg distribution'}
)
plt.show()
###Output
_____no_output_____
###Markdown
Data sizes
###Code
fig, axes = splot.meta_settings(rows=2,figsize=(15,10))
splot.histplot(
adni_df,
discrete=False,
x='subject.study.imagingProtocol.protocolTerm.protocol.Number_of_Slices',
hue='subject.researchGroup',
multiple='stack',
ax=axes[0,0],
plot_kws={'stat':'frequency'},
legend_kws={'title':'ResearchGroup'},
setting_kws={'title':'Number of Slices','xlabel':'Slices','ylabel':'Frequency'}
)
splot.histplot(
adni_df,
discrete=False,
x='subject.study.imagingProtocol.protocolTerm.protocol.Number_of_Columns',
hue='subject.researchGroup',
multiple='stack',
ax=axes[0,1],
plot_kws={'stat':'frequency'},
legend_kws={'title':'ResearchGroup'},
setting_kws={'title':'Number of Columns','xlabel':'Slices','ylabel':'Frequency'}
)
splot.histplot(
adni_df,
discrete=False,
x='subject.study.imagingProtocol.protocolTerm.protocol.Number_of_Rows',
hue='subject.researchGroup',
multiple='stack',
ax=axes[1,0],
plot_kws={'stat':'frequency'},
legend_kws={'title':'ResearchGroup'},
setting_kws={'title':'Number of Rows','xlabel':'Slices','ylabel':'Frequency'}
)
plt.show()
###Output
_____no_output_____
###Markdown
Scoring
###Code
fig, axes = splot.meta_settings(rows=3)
splot.histplot(
adni_df,
discrete=True,
x='subject.visit.assessment.component.assessmentScore.FAQTOTAL',
hue='subject.researchGroup',
multiple='stack',
ax=axes[0,0],
plot_kws={'stat':'frequency'},
legend_kws={'title':'ResearchGroup'},
setting_kws={'title':'Functional Activities Questionnaires (FAQTOTAL)','xlabel':'Score','ylabel':'Frequency'}
)
splot.histplot(
adni_df,
discrete=True,
x='subject.visit.assessment.component.assessmentScore.NPISCORE',
hue='subject.researchGroup',
multiple='stack',
ax=axes[0,1],
legend_kws={'title':'ResearchGroup'},
setting_kws={'title':'assessmentScore_NPISCORE','xlabel':'Score','ylabel':'Frequency'}
)
splot.histplot(
adni_df,
discrete=True,
x='subject.visit.assessment.component.assessmentScore.CDGLOBAL',
hue='subject.researchGroup',
multiple='stack',
ax=axes[1,0],
legend_kws={'title':'ResearchGroup'},
setting_kws={'title':'Clinical Dementia Rating Scale (CDGLOBAL)','xlabel':'Score','ylabel':'Frequency'}
)
splot.histplot(
adni_df,
discrete=True,
x='subject.visit.assessment.component.assessmentScore.GDTOTAL',
hue='subject.researchGroup',
multiple='stack',
ax=axes[1,1],
legend_kws={'title':'ResearchGroup'},
setting_kws={'title':'assessmentScore.GDTOTAL','xlabel':'Score','ylabel':'Frequency'}
)
splot.histplot(
adni_df,
discrete=True,
x='subject.visit.assessment.component.assessmentScore.MMSCORE',
hue='subject.researchGroup',
multiple='stack',
ax=axes[2,0],
legend_kws={'title':'ResearchGroup'},
setting_kws={'title':'Mini-Mental State Examination (MMSCORE)','xlabel':'Score','ylabel':'Frequency'}
)
splot.histplot(
adni_df,
x='subject.visit.assessment.component.assessmentScore.MMSCORE',
hue='subject.researchGroup',
discrete=False,
ax=axes[2,1],
plot_kws={'element':'poly','fill':False},
legend_kws={'title':'ResearchGroup'},
setting_kws={'title':'MMSE Score per Condition'}
)
plt.show()
###Output
_____no_output_____
###Markdown
Visualise brain slices Create Image generator
###Code
SKIP_LAYERS = 10
LIMIT_LAYERS = 70
image_AD_generator = adni.load_images(
files=adni.load_files(adni.path.category+'AD/', adni.filename_category, use_processed=True)
)
image_CN_generator = adni.load_images(
files=adni.load_files(adni.path.category+'CN/', adni.filename_category, use_processed=True)
)
image_MCI_generator = adni.load_images(
files=adni.load_files(adni.path.category+'MCI/', adni.filename_category, use_processed=True)
)
### Testing functions
from nilearn.plotting import view_img, plot_glass_brain, plot_anat, plot_epi
test_image = next(image_CN_generator)
test_image.shape
while True:
test_image = next(image_AD_generator)
plot_anat(test_image, draw_cross=False, display_mode='z',cut_coords=20,annotate=False)
plt.show()
break
images_AD_array = adni.to_array(list(image_AD_generator))
images_CN_array = adni.to_array(list(image_CN_generator))
images_MCI_array = adni.to_array(list(image_MCI_generator))
images_AD = next(images_AD_array)
images_CN = next(images_CN_array)
images_MCI = next(images_CN_array)
###Output
_____no_output_____
###Markdown
Coronal plane (From top)
###Code
image_AD_slices = [images_AD[layer,:,:] for layer in range(0,images_AD.shape[0],SKIP_LAYERS)]
dplay.display_advanced_plot(image_AD_slices)
plt.suptitle("Coronal plane - AD")
image_CN_slices = [images_CN[layer,:,:] for layer in range(0,images_CN.shape[0],SKIP_LAYERS)]
dplay.display_advanced_plot(image_CN_slices)
plt.suptitle("Coronal plane - CN")
image_MCI_slices = [images_MCI[layer,:,:] for layer in range(0,images_MCI.shape[0],SKIP_LAYERS)]
dplay.display_advanced_plot(image_MCI_slices)
plt.suptitle("Coronal plane - MCI")
###Output
_____no_output_____
###Markdown
Sagittal plane (From front)
###Code
image_slices = [images_AD[:,layer,:] for layer in range(0,images_AD.shape[1], SKIP_LAYERS)]
dplay.display_advanced_plot(image_slices)
plt.suptitle("Sagittal plane")
###Output
_____no_output_____
###Markdown
Horisontal plane (from side)
###Code
image_slices = [images_AD[:,:,layer] for layer in range(0,images_AD.shape[2], SKIP_LAYERS)]
dplay.display_advanced_plot(image_slices)
plt.suptitle("Horisonal plane")
###Output
_____no_output_____ |
tutorials/3-IK-optimization.ipynb | ###Markdown
Inverse Kinematics OptimizationThe previous doc explained features and how they define objectives of a constrained optimization problem. Here we show how to use this to solve IK optimization problems.At the bottom there is more general text explaining the basic concepts. Demo of features in Inverse KinematicsLet's setup a standard configuration. (Lock the window with "Always on Top".)
###Code
import sys
sys.path.append('../build') #rai/lib')
import numpy as np
import libry as ry
C = ry.Config()
C.addFile('../rai-robotModels/pr2/pr2.g')
C.addFile('../rai-robotModels/objects/kitchen.g')
C.view()
###Output
_____no_output_____
###Markdown
For simplicity, let's add a frame that represents goals
###Code
goal = C.addFrame("goal")
goal.setShape(ry.ST.sphere, [.05])
goal.setColor([.5,1,1])
goal.setPosition([1,.5,1])
X0 = C.getFrameState() #store the initial configuration
###Output
_____no_output_____
###Markdown
We create an IK engine. The only objective is that the `positionDiff` (position difference in world coordinates) between `pr2L` (the yellow blob in the left hand) and `goal` is equal to zero:
###Code
IK = C.komo_IK(False)
IK.addObjective(type=ry.OT.eq, times = [1,2], feature=ry.FS.positionDiff, frames=['pr2L', 'goal'])
###Output
_____no_output_____
###Markdown
We now call the optimizer (True means with random initialization/restart).
###Code
IK.optimize()
IK.getReport()
###Output
** KOMO::run solver:dense collisions:0 x-dim:25 T:1 k:1 phases:1 stepsPerPhase:1 tau:1 #timeSlices:2 #totalDOFs:25 #frames:358
** optimization time:0.00914103 (kin:0.000131 coll:0.000132 feat:0 newton: 0.00105) setJointStateCount:35
sos:0.0808073 ineq:0 eq:0.238354
###Markdown
The best way to retrieve the result is to copy the optimized IK configuration back into your working configuration C, which is now also displayed
###Code
#IK.getFrameState(1)
C.setFrameState(IK.getFrameState(0))
###Output
_____no_output_____
###Markdown
We can redo the optimization, but for a different configuration, namely a configuration where the goal is in another location.For this we move goal in our working configuration C, then copy C back into the IK engine's configurations:
###Code
## (iterate executing this cell for different goal locations!)
# move goal
goal.setPosition([.8,.2,.5])
# copy C into the IK's internal configuration(s)
IK.setConfigurations(C)
# reoptimize
IK.optimize(0.) # 0: no adding of noise for a random restart
#print(IK.getReport())
print(np.shape(IK.getFrameState(0)))
print(np.shape(IK.getFrameState(0)[1]))
# grab result
# C.setFrameState( IK.getConfiguration(1) )
C.setFrameState(IK.getFrameState(0))
###Output
** KOMO::run solver:dense collisions:0 x-dim:25 T:1 k:1 phases:1 stepsPerPhase:1 tau:1 #timeSlices:2 #totalDOFs:25 #frames:358
** optimization time:0.000305789 (kin:0.000238 coll:0.000149 feat:0 newton: 0.001415) setJointStateCount:3
sos:0.000285026 ineq:0 eq:0.0270084
(179, 7)
(7,)
###Markdown
Let's solve some other problems, always creating a novel IK engine:The relative position of `goal` in `pr2R` coordinates equals [0,0,-.2] (which is 20cm straight in front of the yellow blob)
###Code
C.setFrameState(X0)
IK = C.komo_IK(False)
IK.addObjective(type=ry.OT.eq,times=[1], feature=ry.FS.positionRel, frames=['goal','pr2R'], target=[0,0,-.2])
IK.optimize()
C.setFrameState(IK.getFrameState(0))
###Output
** KOMO::run solver:dense collisions:0 x-dim:25 T:1 k:1 phases:1 stepsPerPhase:1 tau:1 #timeSlices:2 #totalDOFs:25 #frames:358
** optimization time:0.00105824 (kin:5.2e-05 coll:1.1e-05 feat:0 newton: 0.000124) setJointStateCount:12
sos:0.00848536 ineq:0 eq:0.0341739
###Markdown
The distance between `pr2R` and `pr2L` is zero:
###Code
C.setFrameState(X0)
IK = C.komo_IK(False)
IK.addObjective(type=ry.OT.eq, times=[1], feature=ry.FS.distance, frames=['pr2L','pr2R'])
IK.optimize()
C.setFrameState(IK.getFrameState(0))
###Output
** KOMO::run solver:dense collisions:0 x-dim:25 T:1 k:1 phases:1 stepsPerPhase:1 tau:1 #timeSlices:2 #totalDOFs:25 #frames:358
** optimization time:0.00069327 (kin:3.3e-05 coll:5e-06 feat:0 newton: 5.9e-05) setJointStateCount:6
sos:0.00209253 ineq:0 eq:0.0149894
###Markdown
The 3D difference between the z-vector of `pr2R` and the z-vector of `goal`:
###Code
C.setFrameState(X0)
IK = C.komo_IK(False)
IK.addObjective(type=ry.OT.eq, times=[1], feature=ry.FS.vectorZDiff, frames=['pr2R', 'goal'])
IK.optimize()
C.setFrameState(IK.getFrameState(0))
###Output
** KOMO::run solver:dense collisions:0 x-dim:25 T:1 k:1 phases:1 stepsPerPhase:1 tau:1 #timeSlices:2 #totalDOFs:25 #frames:358
** optimization time:0.00144349 (kin:0.000111 coll:2.9e-05 feat:0 newton: 0.000115) setJointStateCount:12
sos:0.0163838 ineq:0 eq:0.0143332
###Markdown
The scalar product between the z-vector of `pr2R` and the z-vector of `goal` is zero:
###Code
C.setFrameState(X0)
IK = C.komo_IK(False)
IK.addObjective(type=ry.OT.eq, times=[1], feature=ry.FS.scalarProductZZ, frames=['pr2R', 'goal'])
IK.optimize()
C.setFrameState(IK.getFrameState(0))
###Output
** KOMO::run solver:dense collisions:0 x-dim:25 T:1 k:1 phases:1 stepsPerPhase:1 tau:1 #timeSlices:2 #totalDOFs:25 #frames:358
** optimization time:0.000686185 (kin:7.1e-05 coll:3e-06 feat:0 newton: 4.2e-05) setJointStateCount:4
sos:0.000248896 ineq:0 eq:0.00308733
###Markdown
etc etc More explanationsAll methods to compute paths or configurations solve constrained optimization problems. To use them, you need to learn to define constrained optimization problems. Some definitions:* An objective defines either a sum-of-square cost term, or an equality constraint, or an inequality constraint in the optimization problem. An objective is defined by its type and its feature. The type can be `sos` (sum-of-squares), `eq`, or `ineq`, referring to the three cases.* A feature is a (differentiable mapping) from the decision variable (the full path, or robot configuration) to a feature space. If the feature space is, e.g., 3-dimensional, this defines 3 sum-of-squares terms, or 3 inequality, or 3 equality constraints, one for each dimension. For instance, the feature can be the 3-dim robot hand position in the 15th time slice of a path optimization problem. If you put an equality constraint on this feature, then this adds 3 equality constraints to the overall path optimization problem.* A feature is defined by the keyword for the feature map (e.g., `pos` for position), typically by a set of frame names that tell which objects we refer to (e.g., `pr2L` for the left hand of the pr2), optionally some modifiers (e.g., a scale or target, which linearly transform the feature map), and the set of configuration IDs or time slices the feature is to be computed from (e.g., `confs=[15]` if the feat is to be computed from the 15th time slice in a path optimization problem).* In path optimization problems, we often want to add objectives for a whole time interval rather than for a single time slice or specific configuration. E.g., avoid collisions from start to end. When adding objectives to the optimization problem we can specify whole intervals, with `times=[1., 2.]`, so that the objective is added to each configuration in this time interval.* Some features, especially velocity and acceleration, refer to a tuple of (consecutive) configurations. E.g., when you impose an acceleration feature, you need to specify `confs=[13, 14, 15]`. Or if you use `times=[1.,1.]`, the acceleration features is computed from the configuration that corresponds to time=1 and the two configurations *before*.* All kinematic feature maps (that depend on only one configuration) can be modified to become a velocity or acceleration features. E.g., the position feature map can be modified to become a velocity or acceleration feature.* The `sos`, `eq`, and `ineq` always refer to the feature map to be *zero*, e.g., constraining all features to be equal to zero with `eq`. This is natural for many features, esp. when they refer to differences (e.g. `posDiff` or `posRel`, which compute the relative position between two frames). However, when one aims to constrain the feature to a non-zero constant value, one can modify the objective with a `target` specification.* Finally, all features can be rescaled with a `scale` specification. Rescaling changes the costs that arise from `sos` objectives. Rescaling also has significant impact on the convergence behavior for `eq` and `ineq` constraints. As a guide: scale constraints so that if they *would* be treated as squared penalties (squaredPenalty optim mode, to be made accessible) convergence to reasonable approximate solutions is efficient. Then the AugLag will also converge efficiently to precise constraints.
###Code
# Designing a cylinder grasp
D=0
C=0
import sys
sys.path.append('../build') #rai/lib')
import numpy as np
import libry as ry
C = ry.Config()
C.addFile('../rai-robotModels/pr2/pr2.g')
C.addFile('../rai-robotModels/objects/kitchen.g')
C.view()
C.setJointState([.7], ["l_gripper_l_finger_joint"])
C.setJointState( C.getJointState() )
goal = C.addFrame("goal")
goal.setShape(ry.ST.cylinder, [0,0,.2, .03])
goal.setColor([.5,1,1])
goal.setPosition([1.81,.5,1])
X0 = C.getFrameState()
C.setFrameState(X0)
goal.setPosition([1.81,.5,1])
IK = C.komo_IK(False)
IK.addObjective(type=ry.OT.eq, times=[1],feature=ry.FS.positionDiff, frames=['pr2L', 'goal'], scale=[[1,0,0],[0,1,0]])
IK.addObjective(type=ry.OT.ineq, times=[1], feature=ry.FS.positionDiff, frames=['pr2L', 'goal'], scale=[[0,0,1]], target=[0,0,.0005])
IK.addObjective(type=ry.OT.ineq, times=[1], feature=ry.FS.positionDiff, frames=['pr2L', 'goal'], scale=[[0,0,-1]], target=[0,0,-.0005])
IK.addObjective(type=ry.OT.sos, times=[1], feature=ry.FS.scalarProductZZ, frames=['pr2L', 'goal'], scale=[0.1])
IK.addObjective(type=ry.OT.eq, times=[1], feature=ry.FS.scalarProductXZ, frames=['pr2L', 'goal'])
IK.optimize()
C.setFrameState(IK.getFrameState(0))
IK.getReport()
IK.view()
###Output
_____no_output_____
###Markdown
Inverse Kinematics OptimizationThe previous doc explained features and how they define objectives of a constrained optimization problem. Here we show how to use this to solve IK optimization problems.At the bottom there is more general text explaining the basic concepts. Demo of features in Inverse KinematicsLet's setup a standard configuration. (Lock the window with "Always on Top".)
###Code
import sys
sys.path += ['../build', '../../../build', '../../lib']
import numpy as np
import libry as ry
C = ry.Config()
C.addFile('../rai-robotModels/pr2/pr2.g')
C.addFile('../rai-robotModels/objects/kitchen.g')
C.view()
###Output
_____no_output_____
###Markdown
For simplicity, let's add a frame that represents goals
###Code
goal = C.addFrame("goal")
goal.setShape(ry.ST.sphere, [.05])
goal.setColor([.5,1,1])
goal.setPosition([1,.5,1])
X0 = C.getFrameState() #store the initial configuration
###Output
_____no_output_____
###Markdown
We create an IK engine. The only objective is that the `positionDiff` (position difference in world coordinates) between `pr2L` (the yellow blob in the left hand) and `goal` is equal to zero:
###Code
IK = C.komo_IK(False)
IK.addObjective(type=ry.OT.eq, feature=ry.FS.positionDiff, frames=['pr2L', 'goal'])
###Output
_____no_output_____
###Markdown
We now call the optimizer (True means with random initialization/restart).
###Code
IK.optimize()
IK.getReport()
###Output
_____no_output_____
###Markdown
The best way to retrieve the result is to copy the optimized IK configuration back into your working configuration C, which is now also displayed
###Code
C.setFrameState( IK.getConfiguration(0) )
###Output
_____no_output_____
###Markdown
We can redo the optimization, but for a different configuration, namely a configuration where the goal is in another location.For this we move goal in our working configuration C, then copy C back into the IK engine's configurations:
###Code
## (iterate executing this cell for different goal locations!)
# move goal
goal.setPosition([.8,.2,.5])
# copy C into the IK's internal configuration(s)
IK.setConfigurations(C)
# reoptimize
IK.optimize(0.) # 0: no adding of noise for a random restart
print(IK.getReport())
# grab result
C.setFrameState( IK.getConfiguration(1) )
###Output
_____no_output_____
###Markdown
Let's solve some other problems, always creating a novel IK engine:The relative position of `goal` in `pr2R` coordinates equals [0,0,-.2] (which is 20cm straight in front of the yellow blob)
###Code
C.setFrameState(X0)
IK = C.komo_IK(False)
IK.addObjective(type=ry.OT.eq, feature=ry.FS.positionRel, frames=['goal','pr2R'], target=[0,0,-.2])
IK.optimize()
C.setFrameState( IK.getConfiguration(0) )
###Output
_____no_output_____
###Markdown
The distance between `pr2R` and `pr2L` is zero:
###Code
C.setFrameState(X0)
IK = C.komo_IK(False)
IK.addObjective(type=ry.OT.eq, feature=ry.FS.distance, frames=['pr2L','pr2R'])
IK.optimize()
C.setFrameState( IK.getConfiguration(0) )
###Output
_____no_output_____
###Markdown
The 3D difference between the z-vector of `pr2R` and the z-vector of `goal`:
###Code
C.setFrameState(X0)
IK = C.komo_IK(False)
IK.addObjective(type=ry.OT.eq, feature=ry.FS.vectorZDiff, frames=['pr2R', 'goal'])
IK.optimize()
C.setFrameState( IK.getConfiguration(0) )
###Output
_____no_output_____
###Markdown
The scalar product between the z-vector of `pr2R` and the z-vector of `goal` is zero:
###Code
C.setFrameState(X0)
IK = C.komo_IK(False)
IK.addObjective(type=ry.OT.eq, feature=ry.FS.scalarProductZZ, frames=['pr2R', 'goal'])
IK.optimize()
C.setFrameState( IK.getConfiguration(0) )
###Output
_____no_output_____
###Markdown
etc etc More explanationsAll methods to compute paths or configurations solve constrained optimization problems. To use them, you need to learn to define constrained optimization problems. Some definitions:* An objective defines either a sum-of-square cost term, or an equality constraint, or an inequality constraint in the optimization problem. An objective is defined by its type and its feature. The type can be `sos` (sum-of-squares), `eq`, or `ineq`, referring to the three cases.* A feature is a (differentiable mapping) from the decision variable (the full path, or robot configuration) to a feature space. If the feature space is, e.g., 3-dimensional, this defines 3 sum-of-squares terms, or 3 inequality, or 3 equality constraints, one for each dimension. For instance, the feature can be the 3-dim robot hand position in the 15th time slice of a path optimization problem. If you put an equality constraint on this feature, then this adds 3 equality constraints to the overall path optimization problem.* A feature is defined by the keyword for the feature map (e.g., `pos` for position), typically by a set of frame names that tell which objects we refer to (e.g., `pr2L` for the left hand of the pr2), optionally some modifiers (e.g., a scale or target, which linearly transform the feature map), and the set of configuration IDs or time slices the feature is to be computed from (e.g., `confs=[15]` if the feat is to be computed from the 15th time slice in a path optimization problem).* In path optimization problems, we often want to add objectives for a whole time interval rather than for a single time slice or specific configuration. E.g., avoid collisions from start to end. When adding objectives to the optimization problem we can specify whole intervals, with `times=[1., 2.]`, so that the objective is added to each configuration in this time interval.* Some features, especially velocity and acceleration, refer to a tuple of (consecutive) configurations. E.g., when you impose an acceleration feature, you need to specify `confs=[13, 14, 15]`. Or if you use `times=[1.,1.]`, the acceleration features is computed from the configuration that corresponds to time=1 and the two configurations *before*.* All kinematic feature maps (that depend on only one configuration) can be modified to become a velocity or acceleration features. E.g., the position feature map can be modified to become a velocity or acceleration feature.* The `sos`, `eq`, and `ineq` always refer to the feature map to be *zero*, e.g., constraining all features to be equal to zero with `eq`. This is natural for many features, esp. when they refer to differences (e.g. `posDiff` or `posRel`, which compute the relative position between two frames). However, when one aims to constrain the feature to a non-zero constant value, one can modify the objective with a `target` specification.* Finally, all features can be rescaled with a `scale` specification. Rescaling changes the costs that arise from `sos` objectives. Rescaling also has significant impact on the convergence behavior for `eq` and `ineq` constraints. As a guide: scale constraints so that if they *would* be treated as squared penalties (squaredPenalty optim mode, to be made accessible) convergence to reasonable approximate solutions is efficient. Then the AugLag will also converge efficiently to precise constraints.
###Code
# Designing a cylinder grasp
D=0
C=0
import sys
sys.path += ['../build', '../../../build', '../../lib']
import numpy as np
import libry as ry
C = ry.Config()
C.addFile('../rai-robotModels/pr2/pr2.g')
C.addFile('../rai-robotModels/objects/kitchen.g')
C.view()
C.setJointState([.7], ["l_gripper_l_finger_joint"])
C.setJointState( C.getJointState() )
goal = C.addFrame("goal")
goal.setShape(ry.ST.cylinder, [0,0,.2, .03])
goal.setColor([.5,1,1])
goal.setPosition([1,.5,1])
X0 = C.getFrameState()
C.setFrameState(X0)
goal.setPosition([1,.5,1.2])
IK = C.komo_IK(False)
IK.addObjective(type=ry.OT.eq, feature=ry.FS.positionDiff, frames=['pr2L', 'goal'], scaleTrans=[[1,0,0],[0,1,0]])
IK.addObjective(type=ry.OT.ineq, feature=ry.FS.positionDiff, frames=['pr2L', 'goal'], scaleTrans=[[0,0,1]], target=[0,0,.05])
IK.addObjective(type=ry.OT.ineq, feature=ry.FS.positionDiff, frames=['pr2L', 'goal'], scaleTrans=[[0,0,-1]], target=[0,0,-.05])
IK.addObjective(type=ry.OT.sos, feature=ry.FS.scalarProductZZ, frames=['pr2L', 'goal'], scale=[0.1])
IK.addObjective(type=ry.OT.eq, feature=ry.FS.scalarProductXZ, frames=['pr2L', 'goal'])
IK.optimize()
C.setFrameState( IK.getConfiguration(0) )
IK.getReport()
IK.view()
###Output
_____no_output_____
###Markdown
Inverse Kinematics OptimizationThe previous doc explained features and how they define objectives of a constrained optimization problem. Here we show how to use this to solve IK optimization problems.At the bottom there is more general text explaining the basic concepts. Demo of features in Inverse KinematicsLet's setup a standard configuration. (Lock the window with "Always on Top".)
###Code
import sys
sys.path.append('../build') #rai/lib')
import numpy as np
import libry as ry
C = ry.Config()
C.addFile('../rai-robotModels/pr2/pr2.g')
C.addFile('../rai-robotModels/objects/kitchen.g')
C.view()
###Output
_____no_output_____
###Markdown
For simplicity, let's add a frame that represents goals
###Code
goal = C.addFrame("goal")
goal.setShape(ry.ST.sphere, [.05])
goal.setColor([.5,1,1])
goal.setPosition([1,.5,1])
X0 = C.getFrameState() #store the initial configuration
###Output
_____no_output_____
###Markdown
We create an IK engine. The only objective is that the `positionDiff` (position difference in world coordinates) between `pr2L` (the yellow blob in the left hand) and `goal` is equal to zero:
###Code
IK = C.komo_IK(False)
IK.addObjective(type=ry.OT.eq, feature=ry.FS.positionDiff, frames=['pr2L', 'goal'])
###Output
_____no_output_____
###Markdown
We now call the optimizer (True means with random initialization/restart).
###Code
IK.optimize()
IK.getReport()
###Output
_____no_output_____
###Markdown
The best way to retrieve the result is to copy the optimized IK configuration back into your working configuration C, which is now also displayed
###Code
C.setFrameState( IK.getConfiguration(0) )
###Output
_____no_output_____
###Markdown
We can redo the optimization, but for a different configuration, namely a configuration where the goal is in another location.For this we move goal in our working configuration C, then copy C back into the IK engine's configurations:
###Code
## (iterate executing this cell for different goal locations!)
# move goal
goal.setPosition([.8,.2,.5])
# copy C into the IK's internal configuration(s)
IK.setConfigurations(C)
# reoptimize
IK.optimize(0.) # 0: no adding of noise for a random restart
print(IK.getReport())
# grab result
C.setFrameState( IK.getConfiguration(1) )
###Output
_____no_output_____
###Markdown
Let's solve some other problems, always creating a novel IK engine:The relative position of `goal` in `pr2R` coordinates equals [0,0,-.2] (which is 20cm straight in front of the yellow blob)
###Code
C.setFrameState(X0)
IK = C.komo_IK(False)
IK.addObjective(type=ry.OT.eq, feature=ry.FS.positionRel, frames=['goal','pr2R'], target=[0,0,-.2])
IK.optimize()
C.setFrameState( IK.getConfiguration(0) )
###Output
_____no_output_____
###Markdown
The distance between `pr2R` and `pr2L` is zero:
###Code
C.setFrameState(X0)
IK = C.komo_IK(False)
IK.addObjective(type=ry.OT.eq, feature=ry.FS.distance, frames=['pr2L','pr2R'])
IK.optimize()
C.setFrameState( IK.getConfiguration(0) )
###Output
_____no_output_____
###Markdown
The 3D difference between the z-vector of `pr2R` and the z-vector of `goal`:
###Code
C.setFrameState(X0)
IK = C.komo_IK(False)
IK.addObjective(type=ry.OT.eq, feature=ry.FS.vectorZDiff, frames=['pr2R', 'goal'])
IK.optimize()
C.setFrameState( IK.getConfiguration(0) )
###Output
_____no_output_____
###Markdown
The scalar product between the z-vector of `pr2R` and the z-vector of `goal` is zero:
###Code
C.setFrameState(X0)
IK = C.komo_IK(False)
IK.addObjective(type=ry.OT.eq, feature=ry.FS.scalarProductZZ, frames=['pr2R', 'goal'])
IK.optimize()
C.setFrameState( IK.getConfiguration(0) )
###Output
_____no_output_____
###Markdown
etc etc More explanationsAll methods to compute paths or configurations solve constrained optimization problems. To use them, you need to learn to define constrained optimization problems. Some definitions:* An objective defines either a sum-of-square cost term, or an equality constraint, or an inequality constraint in the optimization problem. An objective is defined by its type and its feature. The type can be `sos` (sum-of-squares), `eq`, or `ineq`, referring to the three cases.* A feature is a (differentiable mapping) from the decision variable (the full path, or robot configuration) to a feature space. If the feature space is, e.g., 3-dimensional, this defines 3 sum-of-squares terms, or 3 inequality, or 3 equality constraints, one for each dimension. For instance, the feature can be the 3-dim robot hand position in the 15th time slice of a path optimization problem. If you put an equality constraint on this feature, then this adds 3 equality constraints to the overall path optimization problem.* A feature is defined by the keyword for the feature map (e.g., `pos` for position), typically by a set of frame names that tell which objects we refer to (e.g., `pr2L` for the left hand of the pr2), optionally some modifiers (e.g., a scale or target, which linearly transform the feature map), and the set of configuration IDs or time slices the feature is to be computed from (e.g., `confs=[15]` if the feat is to be computed from the 15th time slice in a path optimization problem).* In path optimization problems, we often want to add objectives for a whole time interval rather than for a single time slice or specific configuration. E.g., avoid collisions from start to end. When adding objectives to the optimization problem we can specify whole intervals, with `times=[1., 2.]`, so that the objective is added to each configuration in this time interval.* Some features, especially velocity and acceleration, refer to a tuple of (consecutive) configurations. E.g., when you impose an acceleration feature, you need to specify `confs=[13, 14, 15]`. Or if you use `times=[1.,1.]`, the acceleration features is computed from the configuration that corresponds to time=1 and the two configurations *before*.* All kinematic feature maps (that depend on only one configuration) can be modified to become a velocity or acceleration features. E.g., the position feature map can be modified to become a velocity or acceleration feature.* The `sos`, `eq`, and `ineq` always refer to the feature map to be *zero*, e.g., constraining all features to be equal to zero with `eq`. This is natural for many features, esp. when they refer to differences (e.g. `posDiff` or `posRel`, which compute the relative position between two frames). However, when one aims to constrain the feature to a non-zero constant value, one can modify the objective with a `target` specification.* Finally, all features can be rescaled with a `scale` specification. Rescaling changes the costs that arise from `sos` objectives. Rescaling also has significant impact on the convergence behavior for `eq` and `ineq` constraints. As a guide: scale constraints so that if they *would* be treated as squared penalties (squaredPenalty optim mode, to be made accessible) convergence to reasonable approximate solutions is efficient. Then the AugLag will also converge efficiently to precise constraints.
###Code
# Designing a cylinder grasp
D=0
C=0
import sys
sys.path.append('../build') #rai/lib')
import numpy as np
import libry as ry
C = ry.Config()
C.addFile('../rai-robotModels/pr2/pr2.g')
C.addFile('../rai-robotModels/objects/kitchen.g')
C.view()
C.setJointState([.7], ["l_gripper_l_finger_joint"])
C.setJointState( C.getJointState() )
goal = C.addFrame("goal")
goal.setShape(ry.ST.cylinder, [0,0,.2, .03])
goal.setColor([.5,1,1])
goal.setPosition([1,.5,1])
X0 = C.getFrameState()
C.setFrameState(X0)
goal.setPosition([1,.5,1.2])
IK = C.komo_IK(False)
IK.addObjective(type=ry.OT.eq, feature=ry.FS.positionDiff, frames=['pr2L', 'goal'], scaleTrans=[[1,0,0],[0,1,0]])
IK.addObjective(type=ry.OT.ineq, feature=ry.FS.positionDiff, frames=['pr2L', 'goal'], scaleTrans=[[0,0,1]], target=[0,0,.05])
IK.addObjective(type=ry.OT.ineq, feature=ry.FS.positionDiff, frames=['pr2L', 'goal'], scaleTrans=[[0,0,-1]], target=[0,0,-.05])
IK.addObjective(type=ry.OT.sos, feature=ry.FS.scalarProductZZ, frames=['pr2L', 'goal'], scale=[0.1])
IK.addObjective(type=ry.OT.eq, feature=ry.FS.scalarProductXZ, frames=['pr2L', 'goal'])
IK.optimize()
C.setFrameState( IK.getConfiguration(0) )
IK.getReport()
IK.view()
###Output
_____no_output_____ |
notebooks/03.00-Widget_Basics.ipynb | ###Markdown
Simple Widget Introduction What are widgets? Widgets are eventful python objects that have a representation in the browser, often as a control like a slider, textbox, etc. What can they be used for? You can use widgets to build **interactive GUIs** for your notebooks. You can also use widgets to **synchronize stateful and stateless information** between Python and JavaScript. Using widgets To use the widget framework, you need to import `ipywidgets`.
###Code
import ipywidgets as widgets
###Output
_____no_output_____
###Markdown
repr Widgets have their own display `repr` which allows them to be displayed using IPython's display framework. Constructing and returning an `IntSlider` automatically displays the widget (as seen below). Widgets are displayed inside the output area below the code cell. Clearing cell output will also remove the widget.
###Code
widgets.IntSlider()
###Output
_____no_output_____
###Markdown
display() You can also explicitly display the widget using `display(...)`.
###Code
from IPython.display import display
w = widgets.IntSlider()
display(w)
###Output
_____no_output_____
###Markdown
Multiple display() calls If you display the same widget twice, the displayed instances in the front-end will remain in sync with each other. Try dragging the slider below and watch the slider above.
###Code
display(w)
###Output
_____no_output_____
###Markdown
Why does displaying the same widget twice work? Widgets are represented in the back-end by a single object. Each time a widget is displayed, a new representation of that same object is created in the front-end. These representations are called views. Widget properties All of the IPython widgets share a similar naming scheme. To read the value of a widget, you can query its `value` property.
###Code
w = widgets.IntSlider()
display(w)
w.value
###Output
_____no_output_____
###Markdown
Similarly, to set a widget's value, you can set its `value` property.
###Code
w.value = 100
###Output
_____no_output_____
###Markdown
Keys In addition to `value`, most widgets share `keys`, `description`, and `disabled`. To see the entire list of synchronized, stateful properties of any specific widget, you can query the `keys` property. Generally you should not interact with properties starting with an underscore.
###Code
w.keys
###Output
_____no_output_____
###Markdown
Shorthand for setting the initial values of widget properties While creating a widget, you can set some or all of the initial values of that widget by defining them as keyword arguments in the widget's constructor (as seen below).
###Code
widgets.Text(value='Hello World!', disabled=True)
###Output
_____no_output_____
###Markdown
Linking two similar widgets If you need to display the same value two different ways, you'll have to use two different widgets. Instead of attempting to manually synchronize the values of the two widgets, you can use the `link` or `jslink` function to link two properties together (the difference between these is discussed in [Widget Events](08.00-Widget_Events.ipynb)). Below, the values of two widgets are linked together.
###Code
a = widgets.FloatText()
b = widgets.FloatSlider()
display(a,b)
mylink = widgets.link((a, 'value'), (b, 'value'))
###Output
_____no_output_____
###Markdown
Unlinking widgets Unlinking the widgets is simple. All you have to do is call `.unlink` on the link object. Try changing one of the widgets above after unlinking to see that they can be independently changed.
###Code
# mylink.unlink()
###Output
_____no_output_____
###Markdown
`observe` changes in a widget valueAlmost every widget can be observed for changes in its value that trigger a call to a function. The example below is the slider from the first notebook of the tutorial. The `HTML` widget below the slider displays the square of the number.
###Code
slider = widgets.FloatSlider(
value=7.5,
min=5.0,
max=10.0,
step=0.1,
description='Input:',
)
# Create non-editable text area to display square of value
square_display = widgets.HTML(description="Square: ", value='{}'.format(slider.value**2))
# Create function to update square_display's value when slider changes
def update_square_display(change):
square_display.value = '{}'.format(change.new**2)
slider.observe(update_square_display, names='value')
# Put them in a vertical box
widgets.VBox([slider, square_display])
###Output
_____no_output_____
###Markdown
Simple Widget Introduction What are widgets? Widgets are eventful python objects that have a representation in the browser, often as a control like a slider, textbox, etc. What can they be used for? You can use widgets to build **interactive GUIs** for your notebooks. You can also use widgets to **synchronize stateful and stateless information** between Python and JavaScript. Using widgets To use the widget framework, you need to import `ipywidgets`.
###Code
import ipywidgets as widgets
###Output
_____no_output_____
###Markdown
repr Widgets have their own display `repr` which allows them to be displayed using IPython's display framework. Constructing and returning an `IntSlider` automatically displays the widget (as seen below). Widgets are displayed inside the output area below the code cell. Clearing cell output will also remove the widget.
###Code
widgets.IntSlider()
###Output
_____no_output_____
###Markdown
display() You can also explicitly display the widget using `display(...)`.
###Code
from IPython.display import display
w = widgets.IntSlider()
display(w)
###Output
_____no_output_____
###Markdown
Multiple display() calls If you display the same widget twice, the displayed instances in the front-end will remain in sync with each other. Try dragging the slider below and watch the slider above.
###Code
display(w)
###Output
_____no_output_____
###Markdown
Why does displaying the same widget twice work? Widgets are represented in the back-end by a single object. Each time a widget is displayed, a new representation of that same object is created in the front-end. These representations are called views. Closing widgets You can close a widget by calling its `close()` method.
###Code
display(w)
w.close()
###Output
_____no_output_____
###Markdown
Widget properties All of the IPython widgets share a similar naming scheme. To read the value of a widget, you can query its `value` property.
###Code
w = widgets.IntSlider()
display(w)
w.value
###Output
_____no_output_____
###Markdown
Similarly, to set a widget's value, you can set its `value` property.
###Code
w.value = 100
###Output
_____no_output_____
###Markdown
Keys In addition to `value`, most widgets share `keys`, `description`, and `disabled`. To see the entire list of synchronized, stateful properties of any specific widget, you can query the `keys` property.
###Code
w.keys
###Output
_____no_output_____
###Markdown
Shorthand for setting the initial values of widget properties While creating a widget, you can set some or all of the initial values of that widget by defining them as keyword arguments in the widget's constructor (as seen below).
###Code
widgets.Text(value='Hello World!', disabled=True)
###Output
_____no_output_____
###Markdown
Linking two similar widgets If you need to display the same value two different ways, you'll have to use two different widgets. Instead of attempting to manually synchronize the values of the two widgets, you can use the `link` or `jslink` function to link two properties together (the difference between these is discussed in [Widget Events](08.00-Widget_Events.ipynb)). Below, the values of two widgets are linked together.
###Code
a = widgets.FloatText()
b = widgets.FloatSlider()
display(a,b)
mylink = widgets.jslink((a, 'value'), (b, 'value'))
###Output
_____no_output_____
###Markdown
Unlinking widgets Unlinking the widgets is simple. All you have to do is call `.unlink` on the link object. Try changing one of the widgets above after unlinking to see that they can be independently changed.
###Code
# mylink.unlink()
###Output
_____no_output_____
###Markdown
Simple Widget Introduction What are widgets? Widgets are eventful python objects that have a representation in the browser, often as a control like a slider, textbox, etc. What can they be used for? You can use widgets to build **interactive GUIs** for your notebooks. You can also use widgets to **synchronize stateful and stateless information** between Python and JavaScript. Using widgets To use the widget framework, you need to import `ipywidgets`.
###Code
import ipywidgets as widgets
###Output
_____no_output_____
###Markdown
repr Widgets have their own display `repr` which allows them to be displayed using IPython's display framework. Constructing and returning an `IntSlider` automatically displays the widget (as seen below). Widgets are displayed inside the output area below the code cell. Clearing cell output will also remove the widget.
###Code
widgets.IntSlider()
###Output
_____no_output_____
###Markdown
display() You can also explicitly display the widget using `display(...)`.
###Code
from IPython.display import display
w = widgets.IntSlider()
display(w)
###Output
_____no_output_____
###Markdown
Multiple display() calls If you display the same widget twice, the displayed instances in the front-end will remain in sync with each other. Try dragging the slider below and watch the slider above.
###Code
display(w)
###Output
_____no_output_____
###Markdown
Why does displaying the same widget twice work? Widgets are represented in the back-end by a single object. Each time a widget is displayed, a new representation of that same object is created in the front-end. These representations are called views. Widget properties All of the IPython widgets share a similar naming scheme. To read the value of a widget, you can query its `value` property.
###Code
w = widgets.IntSlider()
display(w)
w.value
###Output
_____no_output_____
###Markdown
Similarly, to set a widget's value, you can set its `value` property.
###Code
w.value = 100
###Output
_____no_output_____
###Markdown
Keys In addition to `value`, most widgets share `keys`, `description`, and `disabled`. To see the entire list of synchronized, stateful properties of any specific widget, you can query the `keys` property. Generally you should not interact with properties starting with an underscore.
###Code
w.keys
###Output
_____no_output_____
###Markdown
Shorthand for setting the initial values of widget properties While creating a widget, you can set some or all of the initial values of that widget by defining them as keyword arguments in the widget's constructor (as seen below).
###Code
widgets.Text(value='Hello World!', disabled=True)
###Output
_____no_output_____
###Markdown
Linking two similar widgets If you need to display the same value two different ways, you'll have to use two different widgets. Instead of attempting to manually synchronize the values of the two widgets, you can use the `link` or `jslink` function to link two properties together (the difference between these is discussed in [Widget Events](08.00-Widget_Events.ipynb)). Below, the values of two widgets are linked together.
###Code
a = widgets.FloatText()
b = widgets.FloatSlider()
display(a,b)
mylink = widgets.link((a, 'value'), (b, 'value'))
###Output
_____no_output_____
###Markdown
Unlinking widgets Unlinking the widgets is simple. All you have to do is call `.unlink` on the link object. Try changing one of the widgets above after unlinking to see that they can be independently changed.
###Code
# mylink.unlink()
###Output
_____no_output_____
###Markdown
Simple Widget Introduction What are widgets? Widgets are eventful python objects that have a representation in the browser, often as a control like a slider, textbox, etc. What can they be used for? You can use widgets to build **interactive GUIs** for your notebooks. You can also use widgets to **synchronize stateful and stateless information** between Python and JavaScript. Using widgets To use the widget framework, you need to import `ipywidgets`.
###Code
import ipywidgets as widgets
###Output
_____no_output_____
###Markdown
repr Widgets have their own display `repr` which allows them to be displayed using IPython's display framework. Constructing and returning an `IntSlider` automatically displays the widget (as seen below). Widgets are displayed inside the output area below the code cell. Clearing cell output will also remove the widget.
###Code
widgets.IntSlider()
###Output
_____no_output_____
###Markdown
display() You can also explicitly display the widget using `display(...)`.
###Code
from IPython.display import display
w = widgets.IntSlider()
display(w)
###Output
_____no_output_____
###Markdown
Multiple display() calls If you display the same widget twice, the displayed instances in the front-end will remain in sync with each other. Try dragging the slider below and watch the slider above.
###Code
display(w)
###Output
_____no_output_____
###Markdown
Why does displaying the same widget twice work? Widgets are represented in the back-end by a single object. Each time a widget is displayed, a new representation of that same object is created in the front-end. These representations are called views. Widget properties All of the IPython widgets share a similar naming scheme. To read the value of a widget, you can query its `value` property.
###Code
w = widgets.IntSlider()
display(w)
w.value
###Output
_____no_output_____
###Markdown
Similarly, to set a widget's value, you can set its `value` property.
###Code
w.value = 100
###Output
_____no_output_____
###Markdown
Keys In addition to `value`, most widgets share `keys`, `description`, and `disabled`. To see the entire list of synchronized, stateful properties of any specific widget, you can query the `keys` property. Generally you should not interact with properties starting with an underscore.
###Code
w.keys
###Output
_____no_output_____
###Markdown
Shorthand for setting the initial values of widget properties While creating a widget, you can set some or all of the initial values of that widget by defining them as keyword arguments in the widget's constructor (as seen below).
###Code
widgets.Text(value='Hello World!', disabled=True)
###Output
_____no_output_____
###Markdown
Linking two similar widgets If you need to display the same value two different ways, you'll have to use two different widgets. Instead of attempting to manually synchronize the values of the two widgets, you can use the `link` or `jslink` function to link two properties together (the difference between these is discussed in [Widget Events](08.00-Widget_Events.ipynb)). Below, the values of two widgets are linked together.
###Code
a = widgets.FloatText()
b = widgets.FloatSlider()
display(a,b)
mylink = widgets.link((a, 'value'), (b, 'value'))
###Output
_____no_output_____
###Markdown
Unlinking widgets Unlinking the widgets is simple. All you have to do is call `.unlink` on the link object. Try changing one of the widgets above after unlinking to see that they can be independently changed.
###Code
# mylink.unlink()
###Output
_____no_output_____
###Markdown
`observe` changes in a widget valueAlmost every widget can be observed for changes in its value that trigger a call to a function. The example below is the slider from the first notebook of the tutorial. The `HTML` widget below the slider displays the square of the number.
###Code
slider = widgets.FloatSlider(
value=7.5,
min=5.0,
max=10.0,
step=0.1,
description='Input:',
)
# Create non-editable text area to display square of value
square_display = widgets.HTML(description="Square: ", value='{}'.format(slider.value**2))
# Create function to update square_display's value when slider changes
def update_square_display(change):
square_display.value = '{}'.format(change.new**2)
slider.observe(update_square_display, names='value')
# Put them in a vertical box
widgets.VBox([slider, square_display])
###Output
_____no_output_____
###Markdown
Simple Widget Introduction What are widgets? Widgets are eventful python objects that have a representation in the browser, often as a control like a slider, textbox, etc. What can they be used for? You can use widgets to build **interactive GUIs** for your notebooks. You can also use widgets to **synchronize stateful and stateless information** between Python and JavaScript. Using widgets To use the widget framework, you need to import `ipywidgets`.
###Code
import ipywidgets as widgets
###Output
_____no_output_____
###Markdown
repr Widgets have their own display `repr` which allows them to be displayed using IPython's display framework. Constructing and returning an `IntSlider` automatically displays the widget (as seen below). Widgets are displayed inside the output area below the code cell. Clearing cell output will also remove the widget.
###Code
widgets.IntSlider()
###Output
_____no_output_____
###Markdown
display() You can also explicitly display the widget using `display(...)`.
###Code
from IPython.display import display
w = widgets.IntSlider()
display(w)
###Output
_____no_output_____
###Markdown
Multiple display() calls If you display the same widget twice, the displayed instances in the front-end will remain in sync with each other. Try dragging the slider below and watch the slider above.
###Code
display(w)
###Output
_____no_output_____
###Markdown
Why does displaying the same widget twice work? Widgets are represented in the back-end by a single object. Each time a widget is displayed, a new representation of that same object is created in the front-end. These representations are called views. Widget properties All of the IPython widgets share a similar naming scheme. To read the value of a widget, you can query its `value` property.
###Code
w = widgets.IntSlider()
display(w)
w.value
###Output
_____no_output_____
###Markdown
Similarly, to set a widget's value, you can set its `value` property.
###Code
w.value = 100
###Output
_____no_output_____
###Markdown
Keys In addition to `value`, most widgets share `keys`, `description`, and `disabled`. To see the entire list of synchronized, stateful properties of any specific widget, you can query the `keys` property. Generally you should not interact with properties starting with an underscore.
###Code
w.keys
w.max
###Output
_____no_output_____
###Markdown
Shorthand for setting the initial values of widget properties While creating a widget, you can set some or all of the initial values of that widget by defining them as keyword arguments in the widget's constructor (as seen below).
###Code
widgets.Text(value='Hello World!', disabled=True)
wid = widgets.Text()
wid.value = "hello"
wid.disabled = True
wid
###Output
_____no_output_____
###Markdown
Linking two similar widgets If you need to display the same value two different ways, you'll have to use two different widgets. Instead of attempting to manually synchronize the values of the two widgets, you can use the `link` or `jslink` function to link two properties together (the difference between these is discussed in [Widget Events](08.00-Widget_Events.ipynb)). Below, the values of two widgets are linked together.
###Code
a = widgets.FloatText()
b = widgets.FloatSlider()
display(a,b)
mylink = widgets.link((a, 'value'), (b, 'value'))
###Output
_____no_output_____
###Markdown
Unlinking widgets Unlinking the widgets is simple. All you have to do is call `.unlink` on the link object. Try changing one of the widgets above after unlinking to see that they can be independently changed.
###Code
mylink.unlink()
###Output
_____no_output_____
###Markdown
`observe` changes in a widget valueAlmost every widget can be observed for changes in its value that trigger a call to a function. The example below is the slider from the first notebook of the tutorial. The `HTML` widget below the slider displays the square of the number.
###Code
slider = widgets.FloatSlider(
value=7.5,
min=5.0,
max=10.0,
step=0.1,
description='Input:',
)
# Create non-editable text area to display square of value
square_display = widgets.HTML(description="Square: ", value='{}'.format(slider.value**2))
# Create function to update square_display's value when slider changes
def update_square_display(change):
square_display.value = '{}'.format(change.new**2)
slider.observe(update_square_display, names='value')
# Put them in a vertical box
widgets.VBox([slider, square_display])
###Output
_____no_output_____
###Markdown
Modified...
###Code
slider = widgets.FloatSlider(
value=7.5,
min=5.0,
max=10.0,
step=0.1,
description='Input:',
)
# Create non-editable text area to display square of value
square_display = widgets.HTML(description="Square: ", value='{:6.2f}'.format(slider.value**2))
# Create function to update square_display's value when slider changes
def update_square_display(change):
square_display.value = '{:6.2f}'.format(change.new**2)
slider.observe(update_square_display, names='value')
# Put them in a vertical box
widgets.VBox([slider, square_display])
###Output
_____no_output_____ |
Assignment 3_Tensorflow/CIFAR_10Xavier_initializer.ipynb | ###Markdown
ASSIGNMENT 3Using Tensorflow to build a CNN network for CIFAR-10 dataset. Each record is of size 1*3072. Building a CNN network to classify the data into the 10 classes. DatasetCIFAR-10 dataset The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images.The dataset is divided into five training batches and one test batch, each with 10000 images. The test batch contains exactly 1000 randomly-selected images from each class. The training batches contain the remaining images in random order, but some training batches may contain more images from one class than another. Between them, the training batches contain exactly 5000 images from each class.http://www.cs.utoronto.ca/~kriz/cifar.html Installing pydrive
###Code
!pip install pydrive
###Output
Collecting pydrive
[?25l Downloading https://files.pythonhosted.org/packages/52/e0/0e64788e5dd58ce2d6934549676243dc69d982f198524be9b99e9c2a4fd5/PyDrive-1.3.1.tar.gz (987kB)
[K 1% |▎ | 10kB 15.9MB/s eta 0:00:01
[K 2% |▋ | 20kB 4.9MB/s eta 0:00:01
[K 3% |█ | 30kB 7.0MB/s eta 0:00:01
[K 4% |█▎ | 40kB 4.4MB/s eta 0:00:01
[K 5% |█▋ | 51kB 5.4MB/s eta 0:00:01
[K 6% |██ | 61kB 6.4MB/s eta 0:00:01
[K 7% |██▎ | 71kB 7.2MB/s eta 0:00:01
[K 8% |██▋ | 81kB 8.0MB/s eta 0:00:01
[K 9% |███ | 92kB 8.8MB/s eta 0:00:01
[K 10% |███▎ | 102kB 7.1MB/s eta 0:00:01
[K 11% |███▋ | 112kB 7.3MB/s eta 0:00:01
[K 12% |████ | 122kB 9.5MB/s eta 0:00:01
[K 13% |████▎ | 133kB 9.5MB/s eta 0:00:01
[K 14% |████▋ | 143kB 16.9MB/s eta 0:00:01
[K 15% |█████ | 153kB 17.1MB/s eta 0:00:01
[K 16% |█████▎ | 163kB 17.0MB/s eta 0:00:01
[K 17% |█████▋ | 174kB 16.7MB/s eta 0:00:01
[K 18% |██████ | 184kB 17.1MB/s eta 0:00:01
[K 19% |██████▎ | 194kB 17.1MB/s eta 0:00:01
[K 20% |██████▋ | 204kB 40.7MB/s eta 0:00:01
[K 21% |███████ | 215kB 20.9MB/s eta 0:00:01
[K 22% |███████▎ | 225kB 20.7MB/s eta 0:00:01
[K 23% |███████▋ | 235kB 20.4MB/s eta 0:00:01
[K 24% |████████ | 245kB 20.2MB/s eta 0:00:01
[K 25% |████████▎ | 256kB 20.3MB/s eta 0:00:01
[K 26% |████████▋ | 266kB 19.7MB/s eta 0:00:01
[K 27% |█████████ | 276kB 20.5MB/s eta 0:00:01
[K 29% |█████████▎ | 286kB 20.6MB/s eta 0:00:01
[K 30% |█████████▋ | 296kB 20.5MB/s eta 0:00:01
[K 31% |██████████ | 307kB 21.9MB/s eta 0:00:01
[K 32% |██████████▎ | 317kB 46.6MB/s eta 0:00:01
[K 33% |██████████▋ | 327kB 48.1MB/s eta 0:00:01
[K 34% |███████████ | 337kB 52.4MB/s eta 0:00:01
[K 35% |███████████▎ | 348kB 49.0MB/s eta 0:00:01
[K 36% |███████████▋ | 358kB 48.5MB/s eta 0:00:01
[K 37% |████████████ | 368kB 51.2MB/s eta 0:00:01
[K 38% |████████████▎ | 378kB 51.1MB/s eta 0:00:01
[K 39% |████████████▋ | 389kB 51.3MB/s eta 0:00:01
[K 40% |█████████████ | 399kB 30.2MB/s eta 0:00:01
[K 41% |█████████████▎ | 409kB 29.4MB/s eta 0:00:01
[K 42% |█████████████▋ | 419kB 29.1MB/s eta 0:00:01
[K 43% |██████████████ | 430kB 28.6MB/s eta 0:00:01
[K 44% |██████████████▎ | 440kB 28.4MB/s eta 0:00:01
[K 45% |██████████████▋ | 450kB 27.5MB/s eta 0:00:01
[K 46% |███████████████ | 460kB 27.5MB/s eta 0:00:01
[K 47% |███████████████▎ | 471kB 27.9MB/s eta 0:00:01
[K 48% |███████████████▋ | 481kB 27.7MB/s eta 0:00:01
[K 49% |████████████████ | 491kB 27.6MB/s eta 0:00:01
[K 50% |████████████████▎ | 501kB 44.8MB/s eta 0:00:01
[K 51% |████████████████▋ | 512kB 39.1MB/s eta 0:00:01
[K 52% |█████████████████ | 522kB 40.0MB/s eta 0:00:01
[K 53% |█████████████████▎ | 532kB 41.6MB/s eta 0:00:01
[K 54% |█████████████████▋ | 542kB 41.8MB/s eta 0:00:01
[K 55% |██████████████████ | 552kB 48.3MB/s eta 0:00:01
[K 57% |██████████████████▎ | 563kB 48.9MB/s eta 0:00:01
[K 58% |██████████████████▋ | 573kB 48.4MB/s eta 0:00:01
[K 59% |███████████████████ | 583kB 49.1MB/s eta 0:00:01
[K 60% |███████████████████▎ | 593kB 49.5MB/s eta 0:00:01
[K 61% |███████████████████▋ | 604kB 49.4MB/s eta 0:00:01
[K 62% |████████████████████ | 614kB 61.6MB/s eta 0:00:01
[K 63% |████████████████████▎ | 624kB 62.2MB/s eta 0:00:01
[K 64% |████████████████████▋ | 634kB 62.4MB/s eta 0:00:01
[K 65% |█████████████████████ | 645kB 59.9MB/s eta 0:00:01
[K 66% |█████████████████████▎ | 655kB 55.2MB/s eta 0:00:01
[K 67% |█████████████████████▋ | 665kB 50.8MB/s eta 0:00:01
[K 68% |██████████████████████ | 675kB 49.8MB/s eta 0:00:01
[K 69% |██████████████████████▎ | 686kB 50.1MB/s eta 0:00:01
[K 70% |██████████████████████▋ | 696kB 50.2MB/s eta 0:00:01
[K 71% |███████████████████████ | 706kB 49.6MB/s eta 0:00:01
[K 72% |███████████████████████▎ | 716kB 50.1MB/s eta 0:00:01
[K 73% |███████████████████████▋ | 727kB 50.3MB/s eta 0:00:01
[K 74% |████████████████████████ | 737kB 49.5MB/s eta 0:00:01
[K 75% |████████████████████████▎ | 747kB 52.0MB/s eta 0:00:01
[K 76% |████████████████████████▋ | 757kB 56.1MB/s eta 0:00:01
[K 77% |████████████████████████▉ | 768kB 61.6MB/s eta 0:00:01
[K 78% |█████████████████████████▏ | 778kB 64.1MB/s eta 0:00:01
[K 79% |█████████████████████████▌ | 788kB 63.8MB/s eta 0:00:01
[K 80% |█████████████████████████▉ | 798kB 64.2MB/s eta 0:00:01
[K 81% |██████████████████████████▏ | 808kB 64.1MB/s eta 0:00:01
[K 82% |██████████████████████████▌ | 819kB 63.6MB/s eta 0:00:01
[K 83% |██████████████████████████▉ | 829kB 63.7MB/s eta 0:00:01
[K 85% |███████████████████████████▏ | 839kB 65.1MB/s eta 0:00:01
[K 86% |███████████████████████████▌ | 849kB 65.2MB/s eta 0:00:01
[K 87% |███████████████████████████▉ | 860kB 51.4MB/s eta 0:00:01
[K 88% |████████████████████████████▏ | 870kB 48.5MB/s eta 0:00:01
[K 89% |████████████████████████████▌ | 880kB 48.2MB/s eta 0:00:01
[K 90% |████████████████████████████▉ | 890kB 47.9MB/s eta 0:00:01
[K 91% |█████████████████████████████▏ | 901kB 46.6MB/s eta 0:00:01
[K 92% |█████████████████████████████▌ | 911kB 47.0MB/s eta 0:00:01
[K 93% |█████████████████████████████▉ | 921kB 46.3MB/s eta 0:00:01
[K 94% |██████████████████████████████▏ | 931kB 46.3MB/s eta 0:00:01
[K 95% |██████████████████████████████▌ | 942kB 46.1MB/s eta 0:00:01
[K 96% |██████████████████████████████▉ | 952kB 45.6MB/s eta 0:00:01
[K 97% |███████████████████████████████▏| 962kB 55.4MB/s eta 0:00:01
[K 98% |███████████████████████████████▌| 972kB 58.7MB/s eta 0:00:01
[K 99% |███████████████████████████████▉| 983kB 58.2MB/s eta 0:00:01
[K 100% |████████████████████████████████| 993kB 19.8MB/s
[?25hRequirement already satisfied: google-api-python-client>=1.2 in /usr/local/lib/python3.6/dist-packages (from pydrive) (1.6.7)
Requirement already satisfied: oauth2client>=4.0.0 in /usr/local/lib/python3.6/dist-packages (from pydrive) (4.1.3)
Requirement already satisfied: PyYAML>=3.0 in /usr/local/lib/python3.6/dist-packages (from pydrive) (3.13)
Requirement already satisfied: uritemplate<4dev,>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from google-api-python-client>=1.2->pydrive) (3.0.0)
Requirement already satisfied: six<2dev,>=1.6.1 in /usr/local/lib/python3.6/dist-packages (from google-api-python-client>=1.2->pydrive) (1.11.0)
Requirement already satisfied: httplib2<1dev,>=0.9.2 in /usr/local/lib/python3.6/dist-packages (from google-api-python-client>=1.2->pydrive) (0.11.3)
Requirement already satisfied: pyasn1-modules>=0.0.5 in /usr/local/lib/python3.6/dist-packages (from oauth2client>=4.0.0->pydrive) (0.2.4)
Requirement already satisfied: pyasn1>=0.1.7 in /usr/local/lib/python3.6/dist-packages (from oauth2client>=4.0.0->pydrive) (0.4.5)
Requirement already satisfied: rsa>=3.1.4 in /usr/local/lib/python3.6/dist-packages (from oauth2client>=4.0.0->pydrive) (4.0)
Building wheels for collected packages: pydrive
Building wheel for pydrive (setup.py) ... [?25ldone
[?25h Stored in directory: /root/.cache/pip/wheels/fa/d2/9a/d3b6b506c2da98289e5d417215ce34b696db856643bad779f4
Successfully built pydrive
Installing collected packages: pydrive
Successfully installed pydrive-1.3.1
###Markdown
Creates connection
###Code
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
import tensorflow as tf
from oauth2client.client import GoogleCredentials
###Output
_____no_output_____
###Markdown
Authenticating and creating the PyDrive client
###Code
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
###Output
_____no_output_____
###Markdown
Getting ids of all the files in folder
###Code
file_list = drive.ListFile({'q': "'1DCFFw2O6BFq8Gk0eYu7JT4Qn224BNoCt' in parents and trashed=false"}).GetList()
for file1 in file_list:
print('title: %s, id: %s' % (file1['title'], file1['id']))
###Output
title: data_batch_1, id: 11Bo2ULl9_aOQ761ONc2vhepnydriELiT
title: data_batch_2, id: 1asFrGiOMdHKY-_KO94e1fLWMBN_Ke92I
title: test_batch, id: 1Wyz_RdmoLe9r9t1rloap8AttSltmfwrp
title: data_batch_3, id: 11ky6i6FSTGWJYOzXquELD4H-GUr49C4f
title: data_batch_5, id: 1rmRytfjJWua0cv17DzST6PqoDFY2APa6
title: data_batch_4, id: 1bb6TRjqNY5A0FsD_P7s3ssepMGWNW-Eh
###Markdown
Importing libraries
###Code
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython import display
from sklearn.model_selection import train_test_split
import pickle
%matplotlib inline
###Output
_____no_output_____
###Markdown
Loading the data
###Code
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
###Output
_____no_output_____
###Markdown
if file is zipped
###Code
zip_file = drive.CreateFile({'id': '11Bo2ULl9_aOQ761ONc2vhepnydriELiT'})
zip_file.GetContentFile('data_batch_1')
zip_file = drive.CreateFile({'id': '1asFrGiOMdHKY-_KO94e1fLWMBN_Ke92I'})
zip_file.GetContentFile('data_batch_2')
zip_file = drive.CreateFile({'id': '11ky6i6FSTGWJYOzXquELD4H-GUr49C4f'})
zip_file.GetContentFile('data_batch_3')
zip_file = drive.CreateFile({'id': '1bb6TRjqNY5A0FsD_P7s3ssepMGWNW-Eh'})
zip_file.GetContentFile('data_batch_4')
zip_file = drive.CreateFile({'id': '1rmRytfjJWua0cv17DzST6PqoDFY2APa6'})
zip_file.GetContentFile('data_batch_5')
zip_file = drive.CreateFile({'id': '1Wyz_RdmoLe9r9t1rloap8AttSltmfwrp'})
zip_file.GetContentFile('test_batch')
data1 = unpickle("data_batch_1")
data2 = unpickle("data_batch_2")
data3 = unpickle("data_batch_3")
data4 = unpickle("data_batch_4")
data5 = unpickle("data_batch_5")
#label_data = unpickle('../input/batches.meta')[b'label_names']
labels1 = data1[b'labels']
data1 = data1[b'data'] * 1.0
labels2 = data2[b'labels']
data2 = data2[b'data'] * 1.0
labels3 = data3[b'labels']
data3 = data3[b'data'] * 1.0
labels4 = data4[b'labels']
data4 = data4[b'data'] * 1.0
labels5 = data5[b'labels']
data5 = data5[b'data'] * 1.0
###Output
_____no_output_____
###Markdown
Combine the remaining four arrays to use as training data
###Code
X_tr = np.concatenate([data1, data2, data3, data4, data5], axis=0)
X_tr = np.dstack((X_tr[:, :1024], X_tr[:, 1024:2048], X_tr[:, 2048:])) / 1.0
X_tr = (X_tr - 128) / 255.0
X_tr = X_tr.reshape(-1, 32, 32, 3)
y_tr = np.concatenate([labels1, labels2, labels3, labels4, labels5], axis=0)
###Output
_____no_output_____
###Markdown
Setting the number of classes
###Code
num_classes = len(np.unique(y_tr))
print("X_tr", X_tr.shape)
print("y_tr", y_tr.shape)
###Output
X_tr (50000, 32, 32, 3)
y_tr (50000,)
###Markdown
Importing the test data
###Code
test_data = unpickle("test_batch")
X_test = test_data[b'data']
X_test = np.dstack((X_test[:, :1024], X_test[:, 1024:2048], X_test[:, 2048:])) / 1.0
X_test = (X_test - 128) / 255.0
X_test = X_test.reshape(-1, 32, 32, 3)
y_test = np.asarray(test_data[b'labels'])
###Output
_____no_output_____
###Markdown
Spliting into test and validation
###Code
X_te, X_cv, y_te, y_cv = train_test_split(X_test, y_test, test_size=0.5, random_state=1)
print("X_te", X_te.shape)
print("X_cv", X_cv.shape)
print("y_te", y_te.shape)
print("y_cv", y_cv.shape)
###Output
X_te (5000, 32, 32, 3)
X_cv (5000, 32, 32, 3)
y_te (5000,)
y_cv (5000,)
###Markdown
Batch generator
###Code
def get_batches(X, y, batch_size, crop=False, distort=True):
# Shuffle X,y
shuffled_idx = np.arange(len(y))
np.random.shuffle(shuffled_idx)
i, h, w, c = X.shape
# Enumerate indexes by steps of batch_size
for i in range(0, len(y), batch_size):
batch_idx = shuffled_idx[i:i+batch_size]
X_return = X[batch_idx]
# optional random crop of images
if crop:
woff = (w - 24) // 4
hoff = (h - 24) // 4
startw = np.random.randint(low=woff,high=woff*2)
starth = np.random.randint(low=hoff,high=hoff*2)
X_return = X_return[:,startw:startw+24,starth:starth+24,:]
# do random flipping of images
coin = np.random.binomial(1, 0.5, size=None)
if coin and distort:
X_return = X_return[...,::-1,:]
yield X_return, y[batch_idx]
###Output
_____no_output_____
###Markdown
Configurations
###Code
epochs = 20 # how many epochs
batch_size = 128
steps_per_epoch = X_tr.shape[0] / batch_size
###Output
_____no_output_____
###Markdown
Building the network MODEL 7.13.4.6.7fModel description:* 7.6 - changed kernel reg rate to 0.01 from 0.1* 7.7 - optimize loss instead of ce 7.8 - remove redundant lambda, replaced scale in regularizer with lambda, changed lambda from 0.01 to 0.001* 7.9 - lambda 0 instead of 3* 7.9.1 - lambda 1 instead of 0* 7.9.2 - use lambda 2 instead of 1* 7.9.4f - use 3x3 pooling instead of 2x2* 7.11.6f - add batch norm after conv 5* 7.11.2f - raise lambda, add dropout after fc2* 7.12.2f - change fully connected dropout to 20%* 7.12.2.2g - change fc dropout to 25%, increase filters in last 2 conv layers to 192 from 128* 7.13.2.2f - change all pool sizes to 2x2 from 3x37.13.3.6f - use different lambda for conv + fc layers
###Code
# Create new graph
graph = tf.Graph()
# whether to retrain model from scratch or use saved model
init = True
model_name = "model_7.13.4.7.7l"
with graph.as_default():
# Placeholders
X = tf.placeholder(dtype=tf.float32, shape=[None, 32, 32, 3])
y = tf.placeholder(dtype=tf.int32, shape=[None])
training = tf.placeholder(dtype=tf.bool)
# create global step for decaying learning rate
global_step = tf.Variable(0, trainable=False)
# lambda 6
lamC = 0.000050
lamF = 0.0025000
# learning rate j
epochs_per_decay = 10
starting_rate = 0.003
decay_factor = 0.9
staircase = True
learning_rate = tf.train.exponential_decay(starting_rate, # start at 0.003
global_step,
steps_per_epoch * epochs_per_decay, # 100 epochs
decay_factor, # 0.5 decrease
staircase=staircase)
# Small epsilon value for the BN transform
epsilon = 1e-3
with tf.name_scope('conv1') as scope:
# Convolutional layer 1
conv1 = tf.layers.conv2d(
X, # Input data
filters=64, # 64 filters
kernel_size=(5, 5), # Kernel size: 5x5
strides=(1, 1), # Stride: 1
padding='SAME', # "same" padding
activation=None, # None
kernel_initializer=tf.truncated_normal_initializer(stddev=5e-2, seed=10),
kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=lamC),
name='conv1'
)
# try batch normalization
bn1 = tf.layers.batch_normalization(
conv1,
axis=-1,
momentum=0.99,
epsilon=epsilon,
center=True,
scale=True,
beta_initializer=tf.zeros_initializer(),
gamma_initializer=tf.ones_initializer(),
moving_mean_initializer=tf.zeros_initializer(),
moving_variance_initializer=tf.ones_initializer(),
training=training,
name='bn1'
)
#apply relu
conv1_bn_relu = tf.nn.relu(bn1, name='relu1')
conv1_bn_relu = tf.layers.dropout(conv1_bn_relu, rate=0.1, seed=9, training=training)
with tf.name_scope('conv2') as scope:
# Convolutional layer 2
conv2 = tf.layers.conv2d(
conv1_bn_relu, # Input data
filters=64, # 64 filters
kernel_size=(5, 5), # Kernel size: 5x5
strides=(1, 1), # Stride: 1
padding='SAME', # "same" padding
activation=None, # None
kernel_initializer=tf.truncated_normal_initializer(stddev=5e-2, seed=8),
kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=lamC),
name='conv2' # Add name
)
# try batch normalization
bn2 = tf.layers.batch_normalization(
conv2,
axis=-1,
momentum=0.9,
epsilon=epsilon,
center=True,
scale=True,
beta_initializer=tf.zeros_initializer(),
gamma_initializer=tf.ones_initializer(),
moving_mean_initializer=tf.zeros_initializer(),
moving_variance_initializer=tf.ones_initializer(),
training=training,
name='bn2'
)
#apply relu
conv2_bn_relu = tf.nn.relu(bn2, name='relu2')
with tf.name_scope('pool1') as scope:
# Max pooling layer 1
pool1 = tf.layers.max_pooling2d(
conv2_bn_relu, # Input
pool_size=(2, 2), # Pool size: 3x3
strides=(2, 2), # Stride: 2
padding='SAME', # "same" padding
name='pool1'
)
# dropout at 10%
pool1 = tf.layers.dropout(pool1, rate=0.1, seed=1, training=training)
with tf.name_scope('conv3') as scope:
# Convolutional layer 3
conv3= tf.layers.conv2d(
pool1, # Input
filters=96, # 96 filters
kernel_size=(4, 4), # Kernel size: 4x4
strides=(1, 1), # Stride: 1
padding='SAME', # "same" padding
activation=None, # None
kernel_initializer=tf.truncated_normal_initializer(stddev=5e-2, seed=7),
kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=lamC),
name='conv3'
)
bn3 = tf.layers.batch_normalization(
conv3,
axis=-1,
momentum=0.9,
epsilon=epsilon,
center=True,
scale=True,
beta_initializer=tf.zeros_initializer(),
gamma_initializer=tf.ones_initializer(),
moving_mean_initializer=tf.zeros_initializer(),
moving_variance_initializer=tf.ones_initializer(),
training=training,
name='bn3'
)
#apply relu
conv3_bn_relu = tf.nn.relu(bn3, name='relu3')
# dropout at 10%
conv3_bn_relu = tf.layers.dropout(conv3_bn_relu, rate=0.1, seed=0, training=training)
with tf.name_scope('conv4') as scope:
# Convolutional layer 4
conv4= tf.layers.conv2d(
conv3_bn_relu, # Input
filters=96, # 96 filters
kernel_size=(4, 4), # Kernel size: 4x4
strides=(1, 1), # Stride: 1
padding='SAME', # "same" padding
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=5e-2, seed=1),
kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=lamC),
name='conv4'
)
bn4 = tf.layers.batch_normalization(
conv4,
axis=-1,
momentum=0.9,
epsilon=epsilon,
center=True,
scale=True,
beta_initializer=tf.zeros_initializer(),
gamma_initializer=tf.ones_initializer(),
moving_mean_initializer=tf.zeros_initializer(),
moving_variance_initializer=tf.ones_initializer(),
training=training,
name='bn4'
)
#apply relu
conv4_bn_relu = tf.nn.relu(bn4, name='relu4')
# Max pooling layer 2
pool2 = tf.layers.max_pooling2d(
conv4_bn_relu, # input
pool_size=(2, 2), # pool size 2x2
strides=(2, 2), # stride 2
padding='SAME',
name='pool2'
)
with tf.name_scope('conv5') as scope:
# Convolutional layer 5
conv5= tf.layers.conv2d(
pool2, # Input
filters=128, # 128 filters
kernel_size=(3, 3), # Kernel size: 3x3
strides=(1, 1), # Stride: 1
padding='SAME', # "same" padding
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=5e-2, seed=2),
kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=lamC),
name='conv5'
)
bn5 = tf.layers.batch_normalization(
conv5,
axis=-1,
momentum=0.9,
epsilon=epsilon,
center=True,
scale=True,
beta_initializer=tf.zeros_initializer(),
gamma_initializer=tf.ones_initializer(),
moving_mean_initializer=tf.zeros_initializer(),
moving_variance_initializer=tf.ones_initializer(),
training=training,
name='bn5'
)
# activation
conv5_bn_relu = tf.nn.relu(bn5, name='relu5')
# try dropout here
conv5_bn_relu = tf.layers.dropout(conv5_bn_relu, rate=0.1, seed=3, training=training)
with tf.name_scope('conv6') as scope:
# Convolutional layer 6
conv6= tf.layers.conv2d(
conv5_bn_relu, # Input
filters=128, # 128 filters
kernel_size=(3, 3), # Kernel size: 3x3
strides=(1, 1), # Stride: 1
padding='SAME', # "same" padding
activation=None, # None
kernel_initializer=tf.truncated_normal_initializer(stddev=5e-2, seed=3),
kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=lamC),
name='conv6'
)
bn6 = tf.layers.batch_normalization(
conv6,
axis=-1,
momentum=0.9,
epsilon=epsilon,
center=True,
scale=True,
beta_initializer=tf.zeros_initializer(),
gamma_initializer=tf.ones_initializer(),
moving_mean_initializer=tf.zeros_initializer(),
moving_variance_initializer=tf.ones_initializer(),
training=training,
name='bn6'
)
#apply relu
conv6_bn_relu = tf.nn.relu(bn6, name='relu6')
# Max pooling layer 3
pool3 = tf.layers.max_pooling2d(
conv6_bn_relu, # input
pool_size=(2, 2), # pool size 2x2
strides=(2, 2), # stride 2
padding='SAME',
name='pool3'
)
with tf.name_scope('flatten') as scope:
# Flatten output
flat_output = tf.contrib.layers.flatten(pool3)
# dropout at 10%
flat_output = tf.layers.dropout(flat_output, rate=0.1, seed=5, training=training)
# Fully connected layer 1
with tf.name_scope('fc1') as scope:
fc1 = tf.layers.dense(
flat_output, # input
1024, # 1024 hidden units
activation=None, # None
kernel_initializer=tf.variance_scaling_initializer(scale=2, seed=4),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=lamF),
name="fc1"
)
bn7 = tf.layers.batch_normalization(
fc1,
axis=-1,
momentum=0.9,
epsilon=epsilon,
center=True,
scale=True,
beta_initializer=tf.zeros_initializer(),
gamma_initializer=tf.ones_initializer(),
moving_mean_initializer=tf.zeros_initializer(),
moving_variance_initializer=tf.ones_initializer(),
training=training,
name='bn7'
)
fc1_relu = tf.nn.relu(bn7, name='fc1_relu')
# dropout at 25%
fc1_do = tf.layers.dropout(fc1_relu, rate=0.25, seed=10, training=training)
# Fully connected layer 2
with tf.name_scope('fc2') as scope:
fc2 = tf.layers.dense(
fc1_do, # input
512, # 512 hidden units
activation=None, # None
kernel_initializer=tf.variance_scaling_initializer(scale=2, seed=5),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=lamF),
name="fc2"
)
bn8 = tf.layers.batch_normalization(
fc2,
axis=-1,
momentum=0.9,
epsilon=epsilon,
center=True,
scale=True,
beta_initializer=tf.zeros_initializer(),
gamma_initializer=tf.ones_initializer(),
moving_mean_initializer=tf.zeros_initializer(),
moving_variance_initializer=tf.ones_initializer(),
training=training,
name='bn8'
)
fc2_relu = tf.nn.relu(bn8, name='fc2_relu')
# dropout at 10%
fc2_do = tf.layers.dropout(fc2_relu, rate=0.25, seed=11, training=training)
# Output layer
logits = tf.layers.dense(
fc2_do, # input
num_classes, # One output unit per category
activation=None, # No activation function
kernel_initializer=tf.contrib.layers.xavier_initializer(uniform=True,
seed=6,dtype=tf.dtypes.float32),
)
# Kernel weights of the 1st conv. layer
with tf.variable_scope('conv1', reuse=True):
conv_kernels1 = tf.get_variable('kernel')
# Mean cross-entropy
mean_ce = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits))
loss = mean_ce + tf.losses.get_regularization_loss()
# Adam optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# Minimize cross-entropy
train_op = optimizer.minimize(loss, global_step=global_step)
# Compute predictions and accuracy
predictions = tf.argmax(logits, axis=1, output_type=tf.int32)
is_correct = tf.equal(y, predictions)
accuracy = tf.reduce_mean(tf.cast(is_correct, dtype=tf.float32))
# add this so that the batch norm gets run
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# Create summary hooks
tf.summary.scalar('accuracy', accuracy)
tf.summary.scalar('cross_entropy', mean_ce)
tf.summary.scalar('learning_rate', learning_rate)
# Merge all the summaries and write them out to /tmp/mnist_logs (by default)
merged = tf.summary.merge_all()
###Output
_____no_output_____
###Markdown
CONFIGURE OPTIONS
###Code
init = True # whether to initialize the model or use a saved version
crop = False # do random cropping of images?
meta_data_every = 5
log_to_tensorboard = False
print_every = 1 # how often to print metrics
checkpoint_every = 1 # how often to save model in epochs
use_gpu = True # whether or not to use the GPU
print_metrics = True # whether to print or plot metrics, if False a plot will be created and updated every epoch
# Placeholders for metrics
if init:
valid_acc_values = []
valid_cost_values = []
train_acc_values = []
train_cost_values = []
train_lr_values = []
train_loss_values = []
config = tf.ConfigProto()
###Output
_____no_output_____
###Markdown
Trainig the model
###Code
with tf.Session(graph=graph, config=config) as sess:
if log_to_tensorboard:
train_writer = tf.summary.FileWriter('./logs/tr_' + model_name, sess.graph)
test_writer = tf.summary.FileWriter('./logs/te_' + model_name)
if not print_metrics:
# create a plot to be updated as model is trained
f, ax = plt.subplots(1,3,figsize=(20,5))
# create the saver
saver = tf.train.Saver()
# If the model is new initialize variables, else restore the session
if init:
sess.run(tf.global_variables_initializer())
else:
saver.restore(sess, './model/cifar_'+model_name+'.ckpt')
# Set seed
np.random.seed(0)
print("Training", model_name, "...")
# Train several epochs
for epoch in range(epochs):
# Accuracy values (train) after each batch
batch_acc = []
batch_cost = []
batch_loss = []
batch_lr = []
# only log run metadata once per epoch
write_meta_data = False
for X_batch, y_batch in get_batches(X_tr, y_tr, batch_size, crop=crop, distort=True):
if write_meta_data and log_to_tensboard:
# create the metadata
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
# Run training and evaluate accuracy
_, _, summary, acc_value, cost_value, loss_value, step, lr = sess.run([train_op, extra_update_ops, merged, accuracy, mean_ce, loss, global_step, learning_rate], feed_dict={
X: X_batch,
y: y_batch,
training: True
},
options=run_options,
run_metadata=run_metadata)
# Save accuracy (current batch)
batch_acc.append(acc_value)
batch_cost.append(cost_value)
batch_lr.append(lr)
batch_loss.append(loss_value)
# write the summary
train_writer.add_run_metadata(run_metadata, 'step %d' % step)
train_writer.add_summary(summary, step)
write_meta_data = False
else:
# Run training without meta data
_, _, summary, acc_value, cost_value, loss_value, step, lr = sess.run([train_op, extra_update_ops, merged, accuracy, mean_ce, loss, global_step, learning_rate], feed_dict={
X: X_batch,
y: y_batch,
training: True
})
# Save accuracy (current batch)
batch_acc.append(acc_value)
batch_cost.append(cost_value)
batch_lr.append(lr)
batch_loss.append(loss_value)
# write the summary
if log_to_tensorboard:
train_writer.add_summary(summary, step)
# save checkpoint every nth epoch
if(epoch % checkpoint_every == 0):
print("Saving checkpoint")
# save the model
save_path = saver.save(sess, './model/cifar_'+model_name+'.ckpt')
# Now that model is saved set init to false so we reload it
init = False
# init batch arrays
batch_cv_acc = []
batch_cv_cost = []
batch_cv_loss = []
# Evaluate validation accuracy with batches so as to not crash the GPU
for X_batch, y_batch in get_batches(X_cv, y_cv, batch_size, crop=crop, distort=False):
summary, valid_acc, valid_cost, valid_loss = sess.run([merged, accuracy, mean_ce, loss], feed_dict={
X: X_batch,
y: y_batch,
training: False
})
batch_cv_acc.append(valid_acc)
batch_cv_cost.append(valid_cost)
batch_cv_loss.append(valid_loss)
# Write average of validation data to summary logs
if log_to_tensorboard:
summary = tf.Summary(value=[tf.Summary.Value(tag="accuracy", simple_value=np.mean(batch_cv_acc)),tf.Summary.Value(tag="cross_entropy", simple_value=np.mean(batch_cv_cost)),])
test_writer.add_summary(summary, step)
step += 1
# take the mean of the values to add to the metrics
valid_acc_values.append(np.mean(batch_cv_acc))
valid_cost_values.append(np.mean(batch_cv_cost))
train_acc_values.append(np.mean(batch_acc))
train_cost_values.append(np.mean(batch_cost))
train_lr_values.append(np.mean(batch_lr))
train_loss_values.append(np.mean(batch_loss))
if print_metrics:
# Print progress every nth epoch to keep output to reasonable amount
if(epoch % print_every == 0):
print('Epoch {:02d} - step {} - cv acc: {:.3f} - train acc: {:.3f} (mean) - cv cost: {:.3f} - lr: {:.5f}'.format(
epoch, step, np.mean(batch_cv_acc), np.mean(batch_acc), np.mean(batch_cv_cost), lr
))
else:
# update the plot
ax[0].cla()
ax[0].plot(valid_acc_values, color="red", label="Validation")
ax[0].plot(train_acc_values, color="blue", label="Training")
ax[0].set_title('Validation accuracy: {:.4f} (mean last 3)'.format(np.mean(valid_acc_values[-3:])))
# since we can't zoom in on plots like in tensorboard, scale y axis to give a decent amount of detail
if np.mean(valid_acc_values[-3:]) > 0.85:
ax[0].set_ylim([0.75,1.0])
elif np.mean(valid_acc_values[-3:]) > 0.75:
ax[0].set_ylim([0.65,1.0])
elif np.mean(valid_acc_values[-3:]) > 0.65:
ax[0].set_ylim([0.55,1.0])
elif np.mean(valid_acc_values[-3:]) > 0.55:
ax[0].set_ylim([0.45,1.0])
ax[0].set_xlabel('Epoch')
ax[0].set_ylabel('Accuracy')
ax[0].legend()
ax[1].cla()
ax[1].plot(valid_cost_values, color="red", label="Validation")
ax[1].plot(train_cost_values, color="blue", label="Training")
ax[1].set_title('Validation xentropy: {:.3f} (mean last 3)'.format(np.mean(valid_cost_values[-3:])))
ax[1].set_xlabel('Epoch')
ax[1].set_ylabel('Cross Entropy')
ax[1].legend()
ax[2].cla()
ax[2].plot(train_lr_values)
ax[2].set_title("Learning rate: {:.6f}".format(np.mean(train_lr_values[-1:])))
ax[2].set_xlabel("Epoch")
ax[2].set_ylabel("Learning Rate")
display.display(plt.gcf())
display.clear_output(wait=True)
# Print data every 50th epoch so I can write it down to compare models
if (not print_metrics) and (epoch % 50 == 0) and (epoch > 1):
if(epoch % print_every == 0):
print('Epoch {:02d} - step {} - cv acc: {:.3f} - train acc: {:.3f} (mean) - cv cost: {:.3f} - lr: {:.5f}'.format(
epoch, step, np.mean(batch_cv_acc), np.mean(batch_acc), np.mean(batch_cv_cost), lr
))
# print results of last epoch
print('Epoch {} - cv acc: {:.4f} - train acc: {:.4f} (mean) - cv cost: {:.3f}'.format(
epochs, np.mean(batch_cv_acc), np.mean(batch_acc), np.mean(batch_cv_cost)
))
# save the session
save_path = saver.save(sess, './model/cifar_'+model_name+'.ckpt')
# init the test data array
test_acc_values = []
# Check on the test data
for X_batch, y_batch in get_batches(X_te, y_te, batch_size, crop=crop, distort=False):
test_accuracy = sess.run(accuracy, feed_dict={
X: X_batch,
y: y_batch,
training: False
})
test_acc_values.append(test_accuracy)
# average test accuracy across batches
test_acc = np.mean(test_acc_values)
# show the plot
plt.show()
# print results of last epoch
print('Epoch {} - cv acc: {:.4f} - train acc: {:.4f} (mean) - cv cost: {:.3f}'.format(
epochs, np.mean(batch_cv_acc), np.mean(batch_acc), np.mean(batch_cv_cost)
))
###Output
Training model_7.13.4.7.7l ...
Saving checkpoint
Epoch 00 - step 391 - cv acc: 0.514 - train acc: 0.480 (mean) - cv cost: 1.403 - lr: 0.00300
Saving checkpoint
Epoch 01 - step 782 - cv acc: 0.680 - train acc: 0.648 (mean) - cv cost: 0.928 - lr: 0.00300
Saving checkpoint
Epoch 02 - step 1173 - cv acc: 0.710 - train acc: 0.710 (mean) - cv cost: 0.902 - lr: 0.00300
Saving checkpoint
Epoch 03 - step 1564 - cv acc: 0.747 - train acc: 0.743 (mean) - cv cost: 0.733 - lr: 0.00300
Saving checkpoint
Epoch 04 - step 1955 - cv acc: 0.753 - train acc: 0.764 (mean) - cv cost: 0.740 - lr: 0.00300
Saving checkpoint
Epoch 05 - step 2346 - cv acc: 0.738 - train acc: 0.784 (mean) - cv cost: 0.802 - lr: 0.00300
Saving checkpoint
Epoch 06 - step 2737 - cv acc: 0.763 - train acc: 0.797 (mean) - cv cost: 0.731 - lr: 0.00300
Saving checkpoint
Epoch 07 - step 3128 - cv acc: 0.774 - train acc: 0.812 (mean) - cv cost: 0.689 - lr: 0.00300
Saving checkpoint
Epoch 08 - step 3519 - cv acc: 0.789 - train acc: 0.819 (mean) - cv cost: 0.624 - lr: 0.00300
Saving checkpoint
Epoch 09 - step 3910 - cv acc: 0.819 - train acc: 0.827 (mean) - cv cost: 0.553 - lr: 0.00270
Saving checkpoint
Epoch 10 - step 4301 - cv acc: 0.818 - train acc: 0.841 (mean) - cv cost: 0.564 - lr: 0.00270
Saving checkpoint
Epoch 11 - step 4692 - cv acc: 0.833 - train acc: 0.848 (mean) - cv cost: 0.510 - lr: 0.00270
Saving checkpoint
Epoch 12 - step 5083 - cv acc: 0.838 - train acc: 0.854 (mean) - cv cost: 0.503 - lr: 0.00270
Saving checkpoint
Epoch 13 - step 5474 - cv acc: 0.824 - train acc: 0.858 (mean) - cv cost: 0.555 - lr: 0.00270
Saving checkpoint
Epoch 14 - step 5865 - cv acc: 0.820 - train acc: 0.861 (mean) - cv cost: 0.532 - lr: 0.00270
Saving checkpoint
Epoch 15 - step 6256 - cv acc: 0.850 - train acc: 0.866 (mean) - cv cost: 0.455 - lr: 0.00270
Saving checkpoint
Epoch 16 - step 6647 - cv acc: 0.848 - train acc: 0.870 (mean) - cv cost: 0.476 - lr: 0.00270
Saving checkpoint
Epoch 17 - step 7038 - cv acc: 0.823 - train acc: 0.871 (mean) - cv cost: 0.551 - lr: 0.00270
Saving checkpoint
Epoch 18 - step 7429 - cv acc: 0.849 - train acc: 0.875 (mean) - cv cost: 0.458 - lr: 0.00270
Saving checkpoint
Epoch 19 - step 7820 - cv acc: 0.851 - train acc: 0.877 (mean) - cv cost: 0.501 - lr: 0.00243
Epoch 20 - cv acc: 0.8510 - train acc: 0.8768 (mean) - cv cost: 0.501
Epoch 20 - cv acc: 0.8510 - train acc: 0.8768 (mean) - cv cost: 0.501
###Markdown
Scoring and Evaluating trained model
###Code
## MODEL 7.20.0.11g
print("Model : ", model_name)
print("Convolutional network accuracy (test set):",test_acc, " Validation Set", valid_acc_values[-1])
###Output
Model : model_7.13.4.7.7l
Convolutional network accuracy (test set): 0.84121096 Validation Set 0.8509766
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.