path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
_notebooks/2022-05-19-emotet_x64_emulation.ipynb | ###Markdown
Emotet x64 Stack Strings Config Emulation> Taking a look at the new Emotet stack strings config - toc: true - badges: true- categories: [emotet,emulation,config,dumpulator,malware] OverviewThe week of May 9th, 2022 Emotet released an update to their x64 malware that used "stack strings" and an obfuscator to protect the strings, keys, and c2s. This was a change from the enrypted strings and c2 tables that were usually stored at the beginning of the `.text` and `.data` sections. Our new approach for config extraction will be to identify the functions used to supply the strings and c2s and emulate them. Samples- [packed](https://malshare.com/sample.php?action=detail&hash=92033dc85730f7dc5dbd85369ea1db8806ce7581c1e9b4764a82abfc54e3146e): `92033dc85730f7dc5dbd85369ea1db8806ce7581c1e9b4764a82abfc54e3146e`- [unpacked](https://malshare.com/sample.php?action=detail&hash=c688e079a16b3345c83a285ac2ae8dd48680298085421c225680f26ceae73eb7): `c688e079a16b3345c83a285ac2ae8dd48680298085421c225680f26ceae73eb7` Tools- Dumpulator minidump emulation [github](https://github.com/mrexodia/dumpulator) Using Dumpulator Emulation
###Code
from dumpulator import Dumpulator
dp = Dumpulator("/tmp/emo2.dmp", quiet=True)
fn_addr_list = [0x07FFA3BA235D0, 0x7FFA3BA213C4,0x7FFA3BA21AAC,0x7FFA3BA2400C,0x7FFA3BA282D8,0x7FFA3BA2A36C,0x7FFA3BA2D370,0x7FFA3BA2DD3C,0x7FFA3BA2E468,0x7FFA3BA30C28,0x7FFA3BA31960,0x7FFA3BA33F28,0x7FFA3BA35980,0x7FFA3BA35B04,0x7FFA3BA3AFB0,0x7FFA3BA3F9A8,0x7FFA3BA3FEE8,0x7FFA3BA4012C,0x7FFA3BA41124,0x7FFA3BA412A4,0x7FFA3BA415A0,0x7FFA3BA42224,0x7FFA3BA43224,0x7FFA3BA44AEC,0x7FFA3BA465F0,0x7FFA3BA46744,0x7FFA3BA47140,0x7FFA3BA472A8,0x7FFA3BA490F8,0x7FFA3BA49850,0x7FFA3BA49A58,0x7FFA3BA49D04,0x7FFA3BA49FB4,0x7FFA3BA4BCB4,0x7FFA3BA4C168]
for fn_addr in fn_addr_list:
out = dp.call(fn_addr, [])
ptxt_str = dp.read_str(out, encoding='utf-16')
print(f"{hex(fn_addr)}: {ptxt_str}")
dp.read_str(out, encoding='utf-16')
import struct
key_decrypt_functions = [0x7FFA3BA33B90, 0x7FFA3BA22048]
for key_decrypt_function in key_decrypt_functions:
tmp_arg = dp.allocate(8)
out = dp.call(key_decrypt_function, [tmp_arg,tmp_arg, tmp_arg, tmp_arg])
key_header = bytes(dp.read(out, 8))
key_len = struct.unpack('<I',key_header[4:8])[0]
full_key_len = 8 + 2 * key_len
key = bytes(dp.read(out, full_key_len))
print(key)
c2_fns = [0x07FFA3BA2E70C, 0x7FFA3BA30D88,0x7FFA3BA4B054,0x7FFA3BA21528,0x7FFA3BA4A4CC,0x7FFA3BA4C2B8,0x7FFA3BA4BF80,0x7FFA3BA4AA74,0x7FFA3BA2DAB0,0x7FFA3BA43584,0x7FFA3BA34644,0x7FFA3BA2FD58,0x7FFA3BA35690,0x7FFA3BA3975C,0x7FFA3BA23BD0,0x7FFA3BA3519C,0x7FFA3BA2B610,0x7FFA3BA4C8B0,0x7FFA3BA3C9F8,0x7FFA3BA36A10,0x7FFA3BA4339C,0x7FFA3BA21F58,0x7FFA3BA4557C,0x7FFA3BA28BC8,0x7FFA3BA3C5B4,0x7FFA3BA45498,0x7FFA3BA21000,0x7FFA3BA24E50,0x7FFA3BA2FBC4,0x7FFA3BA33278,0x7FFA3BA468C0,0x7FFA3BA464FC,0x7FFA3BA28EE0,0x7FFA3BA274A0,0x7FFA3BA3092C,0x7FFA3BA24D58,0x7FFA3BA3E274,0x7FFA3BA2BCF8,0x7FFA3BA4CAF8,0x7FFA3BA4A340,0x7FFA3BA29820,0x7FFA3BA4A0F8,0x7FFA3BA494D8,0x7FFA3BA35C7C,0x7FFA3BA3D5C8,0x7FFA3BA21D48,0x7FFA3BA4103C,0x7FFA3BA28DCC,0x7FFA3BA22F64,0x7FFA3BA301BC,0x7FFA3BA2F454,0x7FFA3BA2B9E4,0x7FFA3BA24C38,0x7FFA3BA3CF80,0x7FFA3BA3E360,0x7FFA3BA45264,0x7FFA3BA49C14,0x7FFA3BA469D0,0x7FFA3BA281E4,0x7FFA3BA2DC28,0x7FFA3BA26F38,0x7FFA3BA45678,0x7FFA3BA24868,0x7FFA3BA35598]
def get_c2_from_fn(c2_fn):
c2_ip = dp.allocate(4)
c2_port = dp.allocate(4)
ret = dp.call(c2_fn, [c2_ip, c2_port])
c2_port_bytes = dp.read(c2_port, 4)
c2_port = struct.unpack('<H',c2_port_bytes[2:4])[0]
c2_ip_bytes = dp.read(c2_ip, 4)
c2_ip = f"{c2_ip_bytes[0]}.{c2_ip_bytes[1]}.{c2_ip_bytes[2]}.{c2_ip_bytes[3]}"
return f"{c2_ip}:{c2_port}"
for c2_fn in c2_fns:
c2 = get_c2_from_fn(c2_fn)
print(f"{c2}")
###Output
172.104.251.154:8080
209.250.246.206:443
110.232.117.186:8080
164.68.99.3:8080
119.193.124.41:7080
212.237.17.99:8080
107.182.225.142:8080
185.8.212.130:7080
153.126.146.25:7080
77.81.247.144:8080
209.126.98.206:8080
201.94.166.162:443
131.100.24.231:80
45.235.8.30:8080
213.241.20.155:443
103.43.46.182:443
0.0.0.0:0
129.232.188.93:443
103.132.242.26:8080
151.106.112.196:8080
45.118.115.99:8080
185.4.135.165:8080
103.70.28.102:8080
51.91.7.5:8080
27.54.89.58:8080
196.218.30.83:443
206.189.28.199:8080
91.207.28.33:8080
79.137.35.198:8080
51.254.140.238:7080
173.212.193.249:8080
203.114.109.124:443
94.23.45.86:4143
63.142.250.212:443
189.126.111.200:7080
160.16.142.56:8080
102.222.215.74:443
5.9.116.246:8080
158.69.222.101:443
167.172.253.162:8080
82.165.152.127:8080
212.24.98.99:8080
197.242.150.244:8080
72.15.201.15:8080
101.50.0.91:8080
51.91.76.89:8080
183.111.227.137:8080
188.44.20.25:443
58.227.42.236:80
45.176.232.124:443
185.157.82.211:8080
163.44.196.120:8080
159.65.88.10:8080
146.59.226.45:443
1.234.2.232:8080
149.56.131.28:8080
209.97.163.214:443
46.55.222.11:443
150.95.66.124:8080
103.75.201.2:443
216.158.226.206:443
134.122.66.193:8080
1.234.21.73:7080
167.99.115.35:8080
###Markdown
Automated Function IdentificationNow that we have a way to extract the data from the functions using emulation all we need to do is create some regexes to indetify the functions and complete our automation.
###Code
import re
import struct
import pefile
dump_image_base = 0x7FFA3BA20000
FILE_PATH = '/tmp/emo_unpacked_1020000.bin'
file_data = open(FILE_PATH, 'rb').read()
pe = pefile.PE(data=file_data)
egg = rb'\x48\x8D\x05(....)\x48\x89\x81..\x00\x00'
for m in re.finditer(egg, file_data):
fn_rel_offset = struct.unpack('<i', m.group(1))[0]
inst_offset = m.start()
fn_rva = pe.get_rva_from_offset(inst_offset) + 7 + fn_rel_offset
fn_addr = dump_image_base + fn_rva
c2 = get_c2_from_fn(fn_addr)
print(f"{hex(fn_addr)}: {c2}")
###Output
0x7ffa3ba30d88: 209.250.246.206:443
0x7ffa3ba4b054: 110.232.117.186:8080
0x7ffa3ba21528: 164.68.99.3:8080
0x7ffa3ba4a4cc: 119.193.124.41:7080
0x7ffa3ba4c2b8: 212.237.17.99:8080
0x7ffa3ba4aa74: 185.8.212.130:7080
0x7ffa3ba2dab0: 153.126.146.25:7080
0x7ffa3ba43584: 77.81.247.144:8080
0x7ffa3ba34644: 209.126.98.206:8080
0x7ffa3ba2fd58: 201.94.166.162:443
0x7ffa3ba35690: 131.100.24.231:80
0x7ffa3ba3975c: 45.235.8.30:8080
0x7ffa3ba23bd0: 213.241.20.155:443
0x7ffa3ba3519c: 103.43.46.182:443
0x7ffa3ba2b610: 0.0.0.0:0
0x7ffa3ba4c8b0: 129.232.188.93:443
0x7ffa3ba3c9f8: 103.132.242.26:8080
0x7ffa3ba36a10: 151.106.112.196:8080
0x7ffa3ba4339c: 45.118.115.99:8080
0x7ffa3ba21f58: 185.4.135.165:8080
0x7ffa3ba4557c: 103.70.28.102:8080
0x7ffa3ba28bc8: 51.91.7.5:8080
0x7ffa3ba3c5b4: 27.54.89.58:8080
0x7ffa3ba21000: 206.189.28.199:8080
0x7ffa3ba2fbc4: 79.137.35.198:8080
0x7ffa3ba33278: 51.254.140.238:7080
0x7ffa3ba468c0: 173.212.193.249:8080
0x7ffa3ba464fc: 203.114.109.124:443
0x7ffa3ba28ee0: 94.23.45.86:4143
0x7ffa3ba3092c: 189.126.111.200:7080
0x7ffa3ba24d58: 160.16.142.56:8080
0x7ffa3ba3e274: 102.222.215.74:443
0x7ffa3ba2bcf8: 5.9.116.246:8080
0x7ffa3ba4a340: 167.172.253.162:8080
0x7ffa3ba29820: 82.165.152.127:8080
0x7ffa3ba494d8: 197.242.150.244:8080
0x7ffa3ba35c7c: 72.15.201.15:8080
0x7ffa3ba3d5c8: 101.50.0.91:8080
0x7ffa3ba21d48: 51.91.76.89:8080
0x7ffa3ba4103c: 183.111.227.137:8080
0x7ffa3ba28dcc: 188.44.20.25:443
0x7ffa3ba22f64: 58.227.42.236:80
0x7ffa3ba301bc: 45.176.232.124:443
0x7ffa3ba2b9e4: 163.44.196.120:8080
0x7ffa3ba24c38: 159.65.88.10:8080
0x7ffa3ba3cf80: 146.59.226.45:443
0x7ffa3ba3e360: 1.234.2.232:8080
0x7ffa3ba49c14: 209.97.163.214:443
0x7ffa3ba469d0: 46.55.222.11:443
0x7ffa3ba2dc28: 103.75.201.2:443
0x7ffa3ba26f38: 216.158.226.206:443
0x7ffa3ba45678: 134.122.66.193:8080
0x7ffa3ba24868: 1.234.21.73:7080
0x7ffa3ba35598: 167.99.115.35:8080
|
4. Essential DataFrame Methods.ipynb | ###Markdown
DataFrame Essential Methods Objectives* Select columns based on data type* Arithmetic operators* Summary statistic methods Arithmetic Operators on DataFramesMost of the arithmetic operators correctly execute when the columns in the DataFrame are all numeric. Let's read in the movie dataset and select only the numeric columns.
###Code
import pandas as pd
movie = pd.read_csv('data/movie.csv', index_col='movie_title')
movie.head()
###Output
_____no_output_____
###Markdown
Use `select_dtypes`
###Code
movie_number = movie.select_dtypes(include='number')
movie_number.head()
movie_str = movie.select_dtypes(include='object')
movie_str.head()
###Output
_____no_output_____
###Markdown
Arithmetic operatorsAll the arithmetic operators will work now that we have an all numeric DataFrame
###Code
(movie_number + 10).head()
###Output
_____no_output_____
###Markdown
Statistical Summary methods Basic* `min, max, median, mean, mode, sum, var, std, corr, cov` Accumulation* `cumsum, cummin, cummax`These methods can be applied vertically or horizontally. For example, we can find the `min` by row or by column Use `axis='index'` or `axis='columns'`Nearly all DataFrame methods have an **`axis`** parameter, which determines the direction of the operation. The **`axis`** parameter can take on two values ('index' or 'columns')* **`axis='index'`** - The operation will happen down the columns.* **`axis='columns'`** - The operation will happen across the rows![][1][1]: images/axis.pngYou can use 0 instead of `index` and 1 instead of `columns` but I prefer to use the words as it is more explicit
###Code
# sum each column
movie_number.sum(axis='index') # this is the default so we don't actually need to specify here
# sum across each row
movie_number.sum(axis='columns')
###Output
_____no_output_____
###Markdown
Your Turn Problem 1Read in the college dataset and put the column **`INSTNM`** in the index. Also read in the college data dictionary and output it. We will use this dataset for the rest of the problems
###Code
# your code here
college = pd.read_csv('data/college.csv', index_col='INSTNM')
college.head()
###Output
_____no_output_____
###Markdown
Problem 2Select the 2 SAT columns and assign them into its own variable called **`sat`**. Find the max SAT score per college and max math and verbal scores.
###Code
# your code here
SAT=college[['SATVRMID','SATMTMID']]
SAT.max(axis=0)
###Output
_____no_output_____
###Markdown
Problem 3There are a lot of missing SAT scores. Use the `isna` method to turn all the values to True/False and find the percentage of schools that have missing scores.
###Code
SAT.isna().mean()
###Output
_____no_output_____
###Markdown
Solutions Problem 1Read in the college dataset and put the column **`INSTNM`** in the index. Also read in the college data dictionary and output it. We will use this dataset for the rest of the problems
###Code
college = pd.read_csv('data/college.csv', index_col='INSTNM')
college.head()
college_dd = pd.read_csv('data/college_data_dictionary.csv')
college_dd
###Output
_____no_output_____
###Markdown
Problem 2Select the 2 SAT columns and put them into its own variable. Find the max SAT score per college and max math and verbal scores.
###Code
sat = college[['SATMTMID', 'SATVRMID']]
sat.head()
sat.max()
sat.max(axis='columns').head(10)
###Output
_____no_output_____
###Markdown
Problem 3There are a lot of missing SAT scores. Use the `isna` method to turn all the values to True/False and find the percentage of schools that have missing scores.
###Code
sat.isna().head()
sat.isna().mean()
###Output
_____no_output_____ |
Machine Learning/Template-Algoritmos-Machine-Learning.ipynb | ###Markdown
Data Science Academy Big Data Real-Time Analytics com Python e Spark Capítulo 6 Template Para Construção de Modelos de Machine Learning Este notebook contém um template do código necessário para criar os principais algoritmos de Machine Learning.
###Code
from IPython.display import Image
Image(url = 'images/processo.png')
###Output
_____no_output_____
###Markdown
Regressão Linear
###Code
# Import do módulo
from sklearn import linear_model
# Datasets de treino e de teste
x_treino = dataset_treino_variaveis_preditoras
y_treino = dataset_treino_variavel_prevista
x_teste = dataset_teste_variaveis_preditoras
# Criando o objeto linear regression
linear = linear_model.LinearRegression()
# Treinando o modelo com dados de treino e checando o score
linear.fit(x_treino, y_treino)
linear.score(x_treino, y_treino)
# Coletando os coeficientes
print('Coefficient: \n', linear.coef_)
print('Intercept: \n', linear.intercept_)
# Previsões
valores_previstos = linear.predict(x_teste)
###Output
_____no_output_____
###Markdown
Regressão Logística
###Code
# Import do módulo
from sklearn.linear_model import LogisticRegression
# Datasets de treino e de teste
x_treino = dataset_treino_variaveis_preditoras
y_treino = dataset_treino_variavel_prevista
x_teste = dataset_teste_variaveis_preditoras
# Criando o objeto logistic regression
modelo = LogisticRegression()
# Treinando o modelo com dados de treino e checando o score
modelo.fit(x_treino, y_treino)
modelo.score(x_treino, y_treino)
# Coletando os coeficientes
print('Coefficient: \n', modelo.coef_)
print('Intercept: \n', modelo.intercept_)
# Previsões
valores_previstos = modelo.predict(x_teste)
###Output
_____no_output_____
###Markdown
Árvores de Decisão
###Code
# Import do módulo
from sklearn import tree
# Datasets de treino e de teste
x_treino = dataset_treino_variaveis_preditoras
y_treino = dataset_treino_variavel_prevista
x_teste = dataset_teste_variaveis_preditoras
# Criando o objeto tree para regressão
modelo = tree.DecisionTreeRegressor()
# Criando o objeto tree para classificação
modelo = tree.DecisionTreeClassifier()
# Treinando o modelo com dados de treino e checando o score
modelo.fit(x_treino, y_treino)
modelo.score(x_treino, y_treino)
# Previsões
valores_previstos = modelo.predict(x_teste)
###Output
_____no_output_____
###Markdown
Naive Bayes
###Code
# Import do módulo
from sklearn.naive_bayes import GaussianNB
# Datasets de treino e de teste
x_treino = dataset_treino_variaveis_preditoras
y_treino = dataset_treino_variavel_prevista
x_teste = dataset_teste_variaveis_preditoras
# Criando o objeto GaussianNB
modelo = GaussianNB()
# Treinando o modelo com dados de treino
modelo.fit(x_treino, y_treino)
# Previsões
valores_previstos = modelo.predict(x_teste)
###Output
_____no_output_____
###Markdown
Support Vector Machines
###Code
# Import do módulo
from sklearn import svm
# Datasets de treino e de teste
x_treino = dataset_treino_variaveis_preditoras
y_treino = dataset_treino_variavel_prevista
x_teste = dataset_teste_variaveis_preditoras
# Criando o objeto de classificação SVM
modelo = svm.svc()
# Treinando o modelo com dados de treino e checando o score
modelo.fit(x_treino, y_treino)
modelo.score(x_treino, y_treino)
# Previsões
valores_previstos = modelo.predict(x_teste)
###Output
_____no_output_____
###Markdown
K-Nearest Neighbors
###Code
# Import do módulo
from sklearn.neighbors import KNeighborsClassifier
# Datasets de treino e de teste
x_treino = dataset_treino_variaveis_preditoras
y_treino = dataset_treino_variavel_prevista
x_teste = dataset_teste_variaveis_preditoras
# Criando o objeto de classificação KNeighbors
KNeighborsClassifier(n_neighbors = 6) # Valor default é 5
# Treinando o modelo com dados de treino
modelo.fit(X, y)
# Previsões
valores_previstos = modelo.predict(x_teste)
###Output
_____no_output_____
###Markdown
K-Means
###Code
# Import do módulo
from sklearn.cluster import KMeans
# Datasets de treino e de teste
x_treino = dataset_treino_variaveis_preditoras
y_treino = dataset_treino_variavel_prevista
x_teste = dataset_teste_variaveis_preditoras
# Criando o objeto KNeighbors
k_means = KMeans(n_clusters = 3, random_state = 0)
# Treinando o modelo com dados de treino
modelo.fit(x_treino)
# Previsões
valores_previstos = modelo.predict(x_teste)
###Output
_____no_output_____
###Markdown
Random Forest
###Code
# Import Library
from sklearn.ensemble import RandomForestClassifier
# Datasets de treino e de teste
x_treino = dataset_treino_variaveis_preditoras
y_treino = dataset_treino_variavel_prevista
x_teste = dataset_teste_variaveis_preditoras
# Criando o objeto Random Forest
model = RandomForestClassifier()
# # Treinando o modelo com dados de treino
modelo.fit(x_treino, x_teste)
# Previsões
valores_previstos = modelo.predict(x_teste)
###Output
_____no_output_____
###Markdown
Redução de Dimensionalidade
###Code
# Import do módulo
from sklearn import decomposition
# Datasets de treino e de teste
x_treino = dataset_treino_variaveis_preditoras
y_treino = dataset_treino_variavel_prevista
x_teste = dataset_teste_variaveis_preditoras
# Create objeto PCA
pca= decomposition.PCA(n_components = k)
# Para Factor analysis
fa= decomposition.FactorAnalysis()
# Reduzindo a dimensão do dataset de treino usando PCA
treino_reduzido = pca.fit_transform(treino)
# Reduzindo a dimensão do dataset de teste
teste_reduzido = pca.transform(teste)
###Output
_____no_output_____
###Markdown
Gradient Boosting & AdaBoost
###Code
# Import do módulo
from sklearn.ensemble import GradientBoostingClassifier
# Datasets de treino e de teste
x_treino = dataset_treino_variaveis_preditoras
y_treino = dataset_treino_variavel_prevista
x_teste = dataset_teste_variaveis_preditoras
# Criando o objeto Gradient Boosting
modelo = GradientBoostingClassifier(n_estimators = 100, learning_rate = 1.0, max_depth = 1, random_state = 0)
# Treinando o modelo com dados de treino
modelo.fit(x_treino, y_treino)
# Previsões
valores_previstos = modelo.predict(x_teste)
###Output
_____no_output_____ |
01.getting-started/11.production-deploy-to-aks/11.production-deploy-to-aks.ipynb | ###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Deploying a web service to Azure Kubernetes Service (AKS)This notebook shows the steps for deploying a service: registering a model, creating an image, provisioning a cluster (one time action), and deploying a service to it. We then test and delete the service, image and model.
###Code
from azureml.core import Workspace
from azureml.core.compute import AksCompute, ComputeTarget
from azureml.core.webservice import Webservice, AksWebservice
from azureml.core.image import Image
from azureml.core.model import Model
import azureml.core
print(azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Get workspaceLoad existing workspace from the config file info.
###Code
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
###Output
_____no_output_____
###Markdown
Register the modelRegister an existing trained model, add descirption and tags.
###Code
#Register the model
from azureml.core.model import Model
model = Model.register(model_path = "sklearn_regression_model.pkl", # this points to a local file
model_name = "sklearn_regression_model.pkl", # this is the name the model is registered as
tags = {'area': "diabetes", 'type': "regression"},
description = "Ridge regression model to predict diabetes",
workspace = ws)
print(model.name, model.description, model.version)
###Output
_____no_output_____
###Markdown
Create an imageCreate an image using the registered model the script that will load and run the model.
###Code
%%writefile score.py
import pickle
import json
import numpy
from sklearn.externals import joblib
from sklearn.linear_model import Ridge
from azureml.core.model import Model
def init():
global model
# note here "sklearn_regression_model.pkl" is the name of the model registered under
# this is a different behavior than before when the code is run locally, even though the code is the same.
model_path = Model.get_model_path('sklearn_regression_model.pkl')
# deserialize the model file back into a sklearn model
model = joblib.load(model_path)
# note you can pass in multiple rows for scoring
def run(raw_data):
try:
data = json.loads(raw_data)['data']
data = numpy.array(data)
result = model.predict(data)
except Exception as e:
result = str(e)
return json.dumps({"result": result.tolist()})
from azureml.core.conda_dependencies import CondaDependencies
myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'])
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
from azureml.core.image import ContainerImage
image_config = ContainerImage.image_configuration(execution_script = "score.py",
runtime = "python",
conda_file = "myenv.yml",
description = "Image with ridge regression model",
tags = {'area': "diabetes", 'type': "regression"}
)
image = ContainerImage.create(name = "myimage1",
# this is the model object
models = [model],
image_config = image_config,
workspace = ws)
image.wait_for_creation(show_output = True)
###Output
_____no_output_____
###Markdown
Provision the AKS ClusterThis is a one time setup. You can reuse this cluster for multiple deployments after it has been created. If you delete the cluster or the resource group that contains it, then you would have to recreate it.
###Code
# Use the default configuration (can also provide parameters to customize)
prov_config = AksCompute.provisioning_configuration()
aks_name = 'my-aks-9'
# Create the cluster
aks_target = ComputeTarget.create(workspace = ws,
name = aks_name,
provisioning_configuration = prov_config)
%%time
aks_target.wait_for_completion(show_output = True)
print(aks_target.provisioning_state)
print(aks_target.provisioning_errors)
###Output
_____no_output_____
###Markdown
Optional step: Attach existing AKS clusterIf you have existing AKS cluster in your Azure subscription, you can attach it to the Workspace.
###Code
'''
# Use the default configuration (can also provide parameters to customize)
resource_id = '/subscriptions/92c76a2f-0e1c-4216-b65e-abf7a3f34c1e/resourcegroups/raymondsdk0604/providers/Microsoft.ContainerService/managedClusters/my-aks-0605d37425356b7d01'
create_name='my-existing-aks'
# Create the cluster
aks_target = AksCompute.attach(workspace=ws, name=create_name, resource_id=resource_id)
# Wait for the operation to complete
aks_target.wait_for_completion(True)
'''
###Output
_____no_output_____
###Markdown
Deploy web service to AKS
###Code
#Set the web service configuration (using default here)
aks_config = AksWebservice.deploy_configuration()
%%time
aks_service_name ='aks-service-1'
aks_service = Webservice.deploy_from_image(workspace = ws,
name = aks_service_name,
image = image,
deployment_config = aks_config,
deployment_target = aks_target)
aks_service.wait_for_deployment(show_output = True)
print(aks_service.state)
###Output
_____no_output_____
###Markdown
Test the web serviceWe test the web sevice by passing data.
###Code
%%time
import json
test_sample = json.dumps({'data': [
[1,2,3,4,5,6,7,8,9,10],
[10,9,8,7,6,5,4,3,2,1]
]})
test_sample = bytes(test_sample,encoding = 'utf8')
prediction = aks_service.run(input_data = test_sample)
print(prediction)
###Output
_____no_output_____
###Markdown
Clean upDelete the service, image and model.
###Code
%%time
aks_service.delete()
image.delete()
model.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Deploying a web service to Azure Kubernetes Service (AKS)This notebook shows the steps for deploying a service: registering a model, creating an image, provisioning a cluster (one time action), and deploying a service to it. We then test and delete the service, image and model.
###Code
from azureml.core import Workspace
from azureml.core.compute import AksCompute, ComputeTarget
from azureml.core.webservice import Webservice, AksWebservice
from azureml.core.image import Image
from azureml.core.model import Model
import azureml.core
print(azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Get workspaceLoad existing workspace from the config file info.
###Code
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
###Output
_____no_output_____
###Markdown
Register the modelRegister an existing trained model, add descirption and tags.
###Code
#Register the model
from azureml.core.model import Model
model = Model.register(model_path = "sklearn_regression_model.pkl", # this points to a local file
model_name = "sklearn_regression_model.pkl", # this is the name the model is registered as
tags = {'area': "diabetes", 'type': "regression"},
description = "Ridge regression model to predict diabetes",
workspace = ws)
print(model.name, model.description, model.version)
###Output
_____no_output_____
###Markdown
Create an imageCreate an image using the registered model the script that will load and run the model.
###Code
%%writefile score.py
import pickle
import json
import numpy
from sklearn.externals import joblib
from sklearn.linear_model import Ridge
from azureml.core.model import Model
def init():
global model
# note here "sklearn_regression_model.pkl" is the name of the model registered under
# this is a different behavior than before when the code is run locally, even though the code is the same.
model_path = Model.get_model_path('sklearn_regression_model.pkl')
# deserialize the model file back into a sklearn model
model = joblib.load(model_path)
# note you can pass in multiple rows for scoring
def run(raw_data):
try:
data = json.loads(raw_data)['data']
data = numpy.array(data)
result = model.predict(data)
except Exception as e:
result = str(e)
return json.dumps({"result": result.tolist()})
from azureml.core.conda_dependencies import CondaDependencies
myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'])
myenv.add_pip_package("pynacl==1.2.1")
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
from azureml.core.image import ContainerImage
image_config = ContainerImage.image_configuration(execution_script = "score.py",
runtime = "python",
conda_file = "myenv.yml",
description = "Image with ridge regression model",
tags = {'area': "diabetes", 'type': "regression"}
)
image = ContainerImage.create(name = "myimage1",
# this is the model object
models = [model],
image_config = image_config,
workspace = ws)
image.wait_for_creation(show_output = True)
###Output
_____no_output_____
###Markdown
Provision the AKS ClusterThis is a one time setup. You can reuse this cluster for multiple deployments after it has been created. If you delete the cluster or the resource group that contains it, then you would have to recreate it.
###Code
# Use the default configuration (can also provide parameters to customize)
prov_config = AksCompute.provisioning_configuration()
aks_name = 'my-aks-9'
# Create the cluster
aks_target = ComputeTarget.create(workspace = ws,
name = aks_name,
provisioning_configuration = prov_config)
%%time
aks_target.wait_for_completion(show_output = True)
print(aks_target.provisioning_state)
print(aks_target.provisioning_errors)
###Output
_____no_output_____
###Markdown
Optional step: Attach existing AKS clusterIf you have existing AKS cluster in your Azure subscription, you can attach it to the Workspace.
###Code
'''
# Use the default configuration (can also provide parameters to customize)
resource_id = '/subscriptions/92c76a2f-0e1c-4216-b65e-abf7a3f34c1e/resourcegroups/raymondsdk0604/providers/Microsoft.ContainerService/managedClusters/my-aks-0605d37425356b7d01'
create_name='my-existing-aks'
# Create the cluster
aks_target = AksCompute.attach(workspace=ws, name=create_name, resource_id=resource_id)
# Wait for the operation to complete
aks_target.wait_for_completion(True)
'''
###Output
_____no_output_____
###Markdown
Deploy web service to AKS
###Code
#Set the web service configuration (using default here)
aks_config = AksWebservice.deploy_configuration()
%%time
aks_service_name ='aks-service-1'
aks_service = Webservice.deploy_from_image(workspace = ws,
name = aks_service_name,
image = image,
deployment_config = aks_config,
deployment_target = aks_target)
aks_service.wait_for_deployment(show_output = True)
print(aks_service.state)
###Output
_____no_output_____
###Markdown
Test the web serviceWe test the web sevice by passing data.
###Code
%%time
import json
test_sample = json.dumps({'data': [
[1,2,3,4,5,6,7,8,9,10],
[10,9,8,7,6,5,4,3,2,1]
]})
test_sample = bytes(test_sample,encoding = 'utf8')
prediction = aks_service.run(input_data = test_sample)
print(prediction)
###Output
_____no_output_____
###Markdown
Clean upDelete the service, image and model.
###Code
%%time
aks_service.delete()
image.delete()
model.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Deploying a web service to Azure Kubernetes Service (AKS)This notebook shows the steps for deploying a service: registering a model, creating an image, provisioning a cluster (one time action), and deploying a service to it. We then test and delete the service, image and model.
###Code
from azureml.core import Workspace
from azureml.core.compute import AksCompute, ComputeTarget
from azureml.core.webservice import Webservice, AksWebservice
from azureml.core.image import Image
from azureml.core.model import Model
import azureml.core
print(azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Get workspaceLoad existing workspace from the config file info.
###Code
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
###Output
_____no_output_____
###Markdown
Register the modelRegister an existing trained model, add descirption and tags.
###Code
#Register the model
from azureml.core.model import Model
model = Model.register(model_path = "sklearn_regression_model.pkl", # this points to a local file
model_name = "sklearn_regression_model.pkl", # this is the name the model is registered as
tags = {'area': "diabetes", 'type': "regression"},
description = "Ridge regression model to predict diabetes",
workspace = ws)
print(model.name, model.description, model.version)
###Output
_____no_output_____
###Markdown
Create an imageCreate an image using the registered model the script that will load and run the model.
###Code
%%writefile score.py
import pickle
import json
import numpy
from sklearn.externals import joblib
from sklearn.linear_model import Ridge
from azureml.core.model import Model
def init():
global model
# note here "sklearn_regression_model.pkl" is the name of the model registered under
# this is a different behavior than before when the code is run locally, even though the code is the same.
model_path = Model.get_model_path('sklearn_regression_model.pkl')
# deserialize the model file back into a sklearn model
model = joblib.load(model_path)
# note you can pass in multiple rows for scoring
def run(raw_data):
try:
data = json.loads(raw_data)['data']
data = numpy.array(data)
result = model.predict(data)
except Exception as e:
result = str(e)
return json.dumps({"result": result.tolist()})
from azureml.core.conda_dependencies import CondaDependencies
myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'])
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
from azureml.core.image import ContainerImage
image_config = ContainerImage.image_configuration(execution_script = "score.py",
runtime = "python",
conda_file = "myenv.yml",
description = "Image with ridge regression model",
tags = {'area': "diabetes", 'type': "regression"}
)
image = ContainerImage.create(name = "myimage1",
# this is the model object
models = [model],
image_config = image_config,
workspace = ws)
image.wait_for_creation(show_output = True)
###Output
_____no_output_____
###Markdown
Provision the AKS ClusterThis is a one time setup. You can reuse this cluster for multiple deployments after it has been created. If you delete the cluster or the resource group that contains it, then you would have to recreate it.
###Code
# Use the default configuration (can also provide parameters to customize)
prov_config = AksCompute.provisioning_configuration()
aks_name = 'my-aks-9'
# Create the cluster
aks_target = ComputeTarget.create(workspace = ws,
name = aks_name,
provisioning_configuration = prov_config)
%%time
aks_target.wait_for_completion(show_output = True)
print(aks_target.provisioning_state)
print(aks_target.provisioning_errors)
###Output
_____no_output_____
###Markdown
Optional step: Attach existing AKS clusterIf you have existing AKS cluster in your Azure subscription, you can attach it to the Workspace.
###Code
'''
# Use the default configuration (can also provide parameters to customize)
resource_id = '/subscriptions/92c76a2f-0e1c-4216-b65e-abf7a3f34c1e/resourcegroups/raymondsdk0604/providers/Microsoft.ContainerService/managedClusters/my-aks-0605d37425356b7d01'
create_name='my-existing-aks'
# Create the cluster
aks_target = AksCompute.attach(workspace=ws, name=create_name, resource_id=resource_id)
# Wait for the operation to complete
aks_target.wait_for_completion(True)
'''
###Output
_____no_output_____
###Markdown
Deploy web service to AKS
###Code
#Set the web service configuration (using default here)
aks_config = AksWebservice.deploy_configuration()
%%time
aks_service_name ='aks-service-1'
aks_service = Webservice.deploy_from_image(workspace = ws,
name = aks_service_name,
image = image,
deployment_config = aks_config,
deployment_target = aks_target)
aks_service.wait_for_deployment(show_output = True)
print(aks_service.state)
###Output
_____no_output_____
###Markdown
Test the web serviceWe test the web sevice by passing data.
###Code
%%time
import json
test_sample = json.dumps({'data': [
[1,2,3,4,5,6,7,8,9,10],
[10,9,8,7,6,5,4,3,2,1]
]})
test_sample = bytes(test_sample,encoding = 'utf8')
prediction = aks_service.run(input_data = test_sample)
print(prediction)
###Output
_____no_output_____
###Markdown
Clean upDelete the service, image and model.
###Code
%%time
aks_service.delete()
image.delete()
model.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Deploying a web service to Azure Kubernetes Service (AKS)This notebook shows the steps for deploying a service: registering a model, creating an image, provisioning a cluster (one time action), and deploying a service to it. We then test and delete the service, image and model.
###Code
from azureml.core import Workspace
from azureml.core.compute import AksCompute, ComputeTarget
from azureml.core.webservice import Webservice, AksWebservice
from azureml.core.image import Image
from azureml.core.model import Model
import azureml.core
print(azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Get workspaceLoad existing workspace from the config file info.
###Code
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
###Output
_____no_output_____
###Markdown
Register the modelRegister an existing trained model, add descirption and tags.
###Code
#Register the model
from azureml.core.model import Model
model = Model.register(model_path = "sklearn_regression_model.pkl", # this points to a local file
model_name = "sklearn_regression_model.pkl", # this is the name the model is registered as
tags = {'area': "diabetes", 'type': "regression"},
description = "Ridge regression model to predict diabetes",
workspace = ws)
print(model.name, model.description, model.version)
###Output
_____no_output_____
###Markdown
Create an imageCreate an image using the registered model the script that will load and run the model.
###Code
%%writefile score.py
import pickle
import json
import numpy
from sklearn.externals import joblib
from sklearn.linear_model import Ridge
from azureml.core.model import Model
def init():
global model
# note here "sklearn_regression_model.pkl" is the name of the model registered under
# this is a different behavior than before when the code is run locally, even though the code is the same.
model_path = Model.get_model_path('sklearn_regression_model.pkl')
# deserialize the model file back into a sklearn model
model = joblib.load(model_path)
# note you can pass in multiple rows for scoring
def run(raw_data):
try:
data = json.loads(raw_data)['data']
data = numpy.array(data)
result = model.predict(data)
# you can return any data type as long as it is JSON-serializable
return result.tolist()
except Exception as e:
error = str(e)
return error
from azureml.core.conda_dependencies import CondaDependencies
myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'])
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
from azureml.core.image import ContainerImage
image_config = ContainerImage.image_configuration(execution_script = "score.py",
runtime = "python",
conda_file = "myenv.yml",
description = "Image with ridge regression model",
tags = {'area': "diabetes", 'type': "regression"}
)
image = ContainerImage.create(name = "myimage1",
# this is the model object
models = [model],
image_config = image_config,
workspace = ws)
image.wait_for_creation(show_output = True)
###Output
_____no_output_____
###Markdown
Provision the AKS ClusterThis is a one time setup. You can reuse this cluster for multiple deployments after it has been created. If you delete the cluster or the resource group that contains it, then you would have to recreate it.
###Code
# Use the default configuration (can also provide parameters to customize)
prov_config = AksCompute.provisioning_configuration()
aks_name = 'my-aks-9'
# Create the cluster
aks_target = ComputeTarget.create(workspace = ws,
name = aks_name,
provisioning_configuration = prov_config)
%%time
aks_target.wait_for_completion(show_output = True)
print(aks_target.provisioning_state)
print(aks_target.provisioning_errors)
###Output
_____no_output_____
###Markdown
Optional step: Attach existing AKS clusterIf you have existing AKS cluster in your Azure subscription, you can attach it to the Workspace.
###Code
'''
# Use the default configuration (can also provide parameters to customize)
resource_id = '/subscriptions/92c76a2f-0e1c-4216-b65e-abf7a3f34c1e/resourcegroups/raymondsdk0604/providers/Microsoft.ContainerService/managedClusters/my-aks-0605d37425356b7d01'
create_name='my-existing-aks'
# Create the cluster
aks_target = AksCompute.attach(workspace=ws, name=create_name, resource_id=resource_id)
# Wait for the operation to complete
aks_target.wait_for_completion(True)
'''
###Output
_____no_output_____
###Markdown
Deploy web service to AKS
###Code
#Set the web service configuration (using default here)
aks_config = AksWebservice.deploy_configuration()
%%time
aks_service_name ='aks-service-1'
aks_service = Webservice.deploy_from_image(workspace = ws,
name = aks_service_name,
image = image,
deployment_config = aks_config,
deployment_target = aks_target)
aks_service.wait_for_deployment(show_output = True)
print(aks_service.state)
###Output
_____no_output_____
###Markdown
Test the web serviceWe test the web sevice by passing data.
###Code
%%time
import json
test_sample = json.dumps({'data': [
[1,2,3,4,5,6,7,8,9,10],
[10,9,8,7,6,5,4,3,2,1]
]})
test_sample = bytes(test_sample,encoding = 'utf8')
prediction = aks_service.run(input_data = test_sample)
print(prediction)
###Output
_____no_output_____
###Markdown
Clean upDelete the service, image and model.
###Code
%%time
aks_service.delete()
image.delete()
model.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Deploying a web service to Azure Kubernetes Service (AKS)This notebook shows the steps for deploying a service: registering a model, creating an image, provisioning a cluster (one time action), and deploying a service to it. We then test and delete the service, image and model.
###Code
from azureml.core import Workspace
from azureml.core.compute import AksCompute, ComputeTarget
from azureml.core.webservice import Webservice, AksWebservice
from azureml.core.image import Image
from azureml.core.model import Model
import azureml.core
print(azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Get workspaceLoad existing workspace from the config file info.
###Code
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
###Output
_____no_output_____
###Markdown
Register the modelRegister an existing trained model, add descirption and tags.
###Code
#Register the model
from azureml.core.model import Model
model = Model.register(model_path = "sklearn_regression_model.pkl", # this points to a local file
model_name = "sklearn_regression_model.pkl", # this is the name the model is registered as
tags = {'area': "diabetes", 'type': "regression"},
description = "Ridge regression model to predict diabetes",
workspace = ws)
print(model.name, model.description, model.version)
###Output
_____no_output_____
###Markdown
Create an imageCreate an image using the registered model the script that will load and run the model.
###Code
%%writefile score.py
import pickle
import json
import numpy
from sklearn.externals import joblib
from sklearn.linear_model import Ridge
from azureml.core.model import Model
def init():
global model
# note here "sklearn_regression_model.pkl" is the name of the model registered under
# this is a different behavior than before when the code is run locally, even though the code is the same.
model_path = Model.get_model_path('sklearn_regression_model.pkl')
# deserialize the model file back into a sklearn model
model = joblib.load(model_path)
# note you can pass in multiple rows for scoring
def run(raw_data):
try:
data = json.loads(raw_data)['data']
data = numpy.array(data)
result = model.predict(data)
# you can return any data type as long as it is JSON-serializable
return result.tolist()
except Exception as e:
error = str(e)
return error
from azureml.core.conda_dependencies import CondaDependencies
myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'])
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
from azureml.core.image import ContainerImage
image_config = ContainerImage.image_configuration(execution_script = "score.py",
runtime = "python",
conda_file = "myenv.yml",
description = "Image with ridge regression model",
tags = {'area': "diabetes", 'type': "regression"}
)
image = ContainerImage.create(name = "myimage1",
# this is the model object
models = [model],
image_config = image_config,
workspace = ws)
image.wait_for_creation(show_output = True)
###Output
_____no_output_____
###Markdown
Provision the AKS ClusterThis is a one time setup. You can reuse this cluster for multiple deployments after it has been created. If you delete the cluster or the resource group that contains it, then you would have to recreate it.
###Code
# Use the default configuration (can also provide parameters to customize)
prov_config = AksCompute.provisioning_configuration()
aks_name = 'my-aks-9'
# Create the cluster
aks_target = ComputeTarget.create(workspace = ws,
name = aks_name,
provisioning_configuration = prov_config)
%%time
aks_target.wait_for_completion(show_output = True)
print(aks_target.provisioning_state)
print(aks_target.provisioning_errors)
###Output
_____no_output_____
###Markdown
Optional step: Attach existing AKS clusterIf you have existing AKS cluster in your Azure subscription, you can attach it to the Workspace.
###Code
'''
# Use the default configuration (can also provide parameters to customize)
resource_id = '/subscriptions/92c76a2f-0e1c-4216-b65e-abf7a3f34c1e/resourcegroups/raymondsdk0604/providers/Microsoft.ContainerService/managedClusters/my-aks-0605d37425356b7d01'
create_name='my-existing-aks'
# Create the cluster
aks_target = AksCompute.attach(workspace=ws, name=create_name, resource_id=resource_id)
# Wait for the operation to complete
aks_target.wait_for_completion(True)
'''
###Output
_____no_output_____
###Markdown
Deploy web service to AKS
###Code
#Set the web service configuration (using default here)
aks_config = AksWebservice.deploy_configuration()
%%time
aks_service_name ='aks-service-1'
aks_service = Webservice.deploy_from_image(workspace = ws,
name = aks_service_name,
image = image,
deployment_config = aks_config,
deployment_target = aks_target)
aks_service.wait_for_deployment(show_output = True)
print(aks_service.state)
###Output
_____no_output_____
###Markdown
Test the web serviceWe test the web sevice by passing data.
###Code
%%time
import json
test_sample = json.dumps({'data': [
[1,2,3,4,5,6,7,8,9,10],
[10,9,8,7,6,5,4,3,2,1]
]})
test_sample = bytes(test_sample,encoding = 'utf8')
prediction = aks_service.run(input_data = test_sample)
print(prediction)
###Output
_____no_output_____
###Markdown
Clean upDelete the service, image and model.
###Code
%%time
aks_service.delete()
image.delete()
model.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Deploying a web service to Azure Kubernetes Service (AKS)This notebook shows the steps for deploying a service: registering a model, creating an image, provisioning a cluster (one time action), and deploying a service to it. We then test and delete the service, image and model.
###Code
from azureml.core import Workspace
from azureml.core.compute import AksCompute, ComputeTarget
from azureml.core.webservice import Webservice, AksWebservice
from azureml.core.image import Image
from azureml.core.model import Model
import azureml.core
print(azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Get workspaceLoad existing workspace from the config file info.
###Code
from azureml.core.workspace import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
###Output
_____no_output_____
###Markdown
Register the modelRegister an existing trained model, add descirption and tags.
###Code
#Register the model
from azureml.core.model import Model
model = Model.register(model_path = "sklearn_regression_model.pkl", # this points to a local file
model_name = "sklearn_regression_model.pkl", # this is the name the model is registered as
tags = {'area': "diabetes", 'type': "regression"},
description = "Ridge regression model to predict diabetes",
workspace = ws)
print(model.name, model.description, model.version)
###Output
_____no_output_____
###Markdown
Create an imageCreate an image using the registered model the script that will load and run the model.
###Code
%%writefile score.py
import pickle
import json
import numpy
from sklearn.externals import joblib
from sklearn.linear_model import Ridge
from azureml.core.model import Model
def init():
global model
# note here "sklearn_regression_model.pkl" is the name of the model registered under
# this is a different behavior than before when the code is run locally, even though the code is the same.
model_path = Model.get_model_path('sklearn_regression_model.pkl')
# deserialize the model file back into a sklearn model
model = joblib.load(model_path)
# note you can pass in multiple rows for scoring
def run(raw_data):
try:
data = json.loads(raw_data)['data']
data = numpy.array(data)
result = model.predict(data)
except Exception as e:
result = str(e)
return json.dumps({"result": result.tolist()})
from azureml.core.conda_dependencies import CondaDependencies
myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn'])
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
from azureml.core.image import ContainerImage
image_config = ContainerImage.image_configuration(execution_script = "score.py",
runtime = "python",
conda_file = "myenv.yml",
description = "Image with ridge regression model",
tags = {'area': "diabetes", 'type': "regression"}
)
image = ContainerImage.create(name = "myimage1",
# this is the model object
models = [model],
image_config = image_config,
workspace = ws)
image.wait_for_creation(show_output = True)
###Output
_____no_output_____
###Markdown
Provision the AKS ClusterThis is a one time setup. You can reuse this cluster for multiple deployments after it has been created. If you delete the cluster or the resource group that contains it, then you would have to recreate it.
###Code
# Use the default configuration (can also provide parameters to customize)
prov_config = AksCompute.provisioning_configuration()
aks_name = 'my-aks-9'
# Create the cluster
aks_target = ComputeTarget.create(workspace = ws,
name = aks_name,
provisioning_configuration = prov_config)
%%time
aks_target.wait_for_completion(show_output = True)
print(aks_target.provisioning_state)
print(aks_target.provisioning_errors)
###Output
_____no_output_____
###Markdown
Optional step: Attach existing AKS clusterIf you have existing AKS cluster in your Azure subscription, you can attach it to the Workspace.
###Code
'''
# Use the default configuration (can also provide parameters to customize)
resource_id = '/subscriptions/92c76a2f-0e1c-4216-b65e-abf7a3f34c1e/resourcegroups/raymondsdk0604/providers/Microsoft.ContainerService/managedClusters/my-aks-0605d37425356b7d01'
create_name='my-existing-aks'
# Create the cluster
aks_target = AksCompute.attach(workspace=ws, name=create_name, resource_id=resource_id)
# Wait for the operation to complete
aks_target.wait_for_completion(True)
'''
###Output
_____no_output_____
###Markdown
Deploy web service to AKS
###Code
#Set the web service configuration (using default here)
aks_config = AksWebservice.deploy_configuration()
%%time
aks_service_name ='aks-service-1'
aks_service = Webservice.deploy_from_image(workspace = ws,
name = aks_service_name,
image = image,
deployment_config = aks_config,
deployment_target = aks_target)
aks_service.wait_for_deployment(show_output = True)
print(aks_service.state)
###Output
_____no_output_____
###Markdown
Test the web serviceWe test the web sevice by passing data.
###Code
%%time
import json
test_sample = json.dumps({'data': [
[1,2,3,4,5,6,7,8,9,10],
[10,9,8,7,6,5,4,3,2,1]
]})
test_sample = bytes(test_sample,encoding = 'utf8')
prediction = aks_service.run(input_data = test_sample)
print(prediction)
###Output
_____no_output_____
###Markdown
Clean upDelete the service, image and model.
###Code
%%time
aks_service.delete()
image.delete()
model.delete()
###Output
_____no_output_____ |
Juliana2701w3.ipynb | ###Markdown
Vetor de features para classificação $X_c = [a, av, aa, C]$ $a \rightarrow$ ângulo; $av \rightarrow$ velocidade angular; $aa \rightarrow$ aceleração angular; $C \rightarrow$ indice de classificação Indice de classificação $"c"$: $C = 0 \rightarrow$ Marcha normal; $C = 1 \rightarrow$ Marcha de subida de escada; $C = 2 \rightarrow$ Marvha de descidade escada.
###Code
print a.shape, av.shape, aa.shape
len_xc = len(a)-2
Xcp = np.hstack(
(a[2:].reshape((len_xc,1)),
av[1:].reshape((len_xc,1))))
Xcp = np.hstack(
(Xcp.reshape((len_xc,2)),
aa.reshape((len_xc,1))))
Xcp = np.hstack(
(Xcp.reshape((len_xc,3)),
l_a[2:].reshape((len_xc,1))))
Xcp = np.hstack(
(Xcp.reshape((len_xc,4)),
l_av[1:].reshape((len_xc,1))))
Xcp = np.hstack(
(Xcp.reshape((len_xc,5)),
l_aa.reshape((len_xc,1))))
Xcp = np.hstack(
(Xcp.reshape((len_xc,6)),
pos_foot_r[2:].reshape((len_xc,1))))
Xcp = np.hstack(
(Xcp.reshape((len_xc,7)),
pos_foot_l[2:].reshape((len_xc,1))))
vz_r = velocities3d[1:,2] # Velocidade no eixo z
vz_l = l_velocities3d[1:,2] # Velocidade no eixo z
Xcp = np.hstack(
(Xcp.reshape((len_xc,8)),
vz_r.reshape((len_xc,1))))
Xcp = np.hstack(
(Xcp.reshape((len_xc,9)),
vz_l.reshape((len_xc,1))))
###Output
_____no_output_____
###Markdown
Adiciando coluna de classificação
###Code
C = (np.ones(len_xc)*0).reshape((len_xc,1))
Xc = np.hstack(
(Xcp.reshape((len_xc,10)),
C.reshape((len_xc,1))))
Xc.shape
## salvando em arquivo na pasta <classifier_data>
from Data_Savior_J import save_it_now
save_it_now(Xc, "./classifier_data/walk3.data")
###Output
Saved to file
###Markdown
Checks for Nan
###Code
Nan = np.isnan(Xc)
Nan
###Output
_____no_output_____ |
nbs/15_examples.summarization.ipynb | ###Markdown
Summarization
###Code
#hide
from transformers import AutoModelForSeq2SeqLM, DataCollatorForSeq2Seq, MarianConfig, AutoTokenizer, AutoConfig
from datasets import load_dataset, load_metric, concatenate_datasets
from fastai.text.all import *
from fastai.callback.wandb import *
import nltk
from fasthugs.learner import TransLearner, GeneratePreds
from fasthugs.data import TransformersTextBlock, TextGetter, KeyGetter, get_splits
from fasthugs.metrics import RougeScore
###Output
_____no_output_____
###Markdown
Setup
###Code
model_name = "t5-small"
max_len = 512
bs = 16
val_bs = bs*2
lr = 2e-5
#hide_output
# datasets = load_dataset("xsum")
train_ds = load_dataset("xsum", split='validation')
###Output
_____no_output_____
###Markdown
Dataloaders
###Code
#hide_output
tokenizer = AutoTokenizer.from_pretrained(model_name)
# splits = get_splits(datasets)
# train_ds = concatenate_datasets([datasets['train'], datasets['validation']])
splits = RandomSplitter()(train_ds)
@ItemTransform
def untuple1(x):
return (*x[0], )
dblock = DataBlock(
blocks = [TransformersTextBlock(tokenizer=tokenizer, do_targets=True, with_labels=True)],
get_x=TextGetter('document', 'summary', prefix1='summarize: '),
item_tfms=untuple1,
splitter=IndexSplitter(splits[1]))
#hide
# dblock.summary(train_ds)
%%time
dls = dblock.dataloaders(train_ds, bs=bs, val_bs=bs*2, shuffle=True)
#hide
b = dls.one_batch()
b
#collapse
dls.show_batch(max_n=4)
#hide
# import wandb
# WANDB_NAME = f'{ds_name}-{model_name}'
# GROUP = f'{ds_name}-{model_name}-simple-{lr:.0e}'
# NOTES = f'finetuning {model_name} with RAdam lr={lr:.0e}'
# CONFIG = {}
# TAGS =[model_name, ds_name, 'radam']
#hide
# wandb.init(reinit=True, project="fasthugs", entity="fastai_community",
# name=WANDB_NAME, group=GROUP, notes=NOTES, tags=TAGS, config=CONFIG);
###Output
_____no_output_____
###Markdown
Training
###Code
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
learn = TransLearner(dls, model, loss_func=noop)
learn.add_cb(RougeScore(tokenizer));
#hide
learn.show_training_loop()
learn.validate()
learn.fit_one_cycle(2, 1e-4)
#hide
def display_validation_results(res):
m_names = ['train_loss', 'valid_loss']+[m.name for m in learn.metrics]
res_df = pd.DataFrame.from_dict({k:[v] for k,v in zip(m_names, res)})
display_df(res_df)
%%time
res = learn.validate()
display_validation_results(res)
###Output
_____no_output_____
###Markdown
So far we computed predictions using single forward pass so token generated at timestep $t$ has access to reference tokens $0:t-1$. But this at inference time we will generate autoregressively previously generated tokens are used to generate the next one. Let's evaluete the model with this more realistic procedure. This can be done by adding `GeneratePreds` callback:
###Code
%%time
res = learn.validate(cbs=GeneratePreds())
display_validation_results(res)
sample = train_ds[0]
document_text = ' '.join(sample['document'].split('\n'))
print(f"Document:\n{document_text}")
print(f"\nReference summary: {sample['summary']}")
inp = tokenizer('summarize: '+sample['document'], return_tensors='pt')
pred = learn.generate(inp['input_ids'].to(dls.device))
out = tokenizer.decode(pred[0].cpu(), skip_special_tokens=True)
print(f"\nPredicted summary: {out}")
###Output
Document:
The country's consumer watchdog has taken Apple to court for false advertising because the tablet computer does not work on Australia's 4G network. Apple's lawyers said they were willing to publish a clarification. However the company does not accept that it misled customers. The Australian Competition and Consumer Commission (ACCC) said on Tuesday: "Apple's recent promotion of the new 'iPad with wi-fi + 4G' is misleading because it represents to Australian consumers that the product can, with a sim card, connect to a 4G mobile data network in Australia, when this is not the case." The watchdog then lodged a complaint at the Federal Court in Melbourne. At a preliminary hearing, Apple lawyer Paul Anastassiou said Apple had never claimed the device would work fully on the current 4G network operated by Telstra. Apple says the new iPad works on what is globally accepted to be a 4G network. The matter will go to a full trial on 2 May. The Apple iPad's third version went on sale earlier this month, with Australia the first country where it was available. Shoppers lined up by the hundreds at Apple stores on opening day and the company said it had been its strongest iPad launch to date. The ACCC said it was seeking an injunction on sales as well as a financial penalty against Apple, corrective advertising and refunds to consumers. On its website, Apple does state that 4G LTE is only supported on selected networks in the US and Canada.
Reference summary: US technology firm Apple has offered to refund Australian customers who felt misled about the 4G capabilities of the new iPad.
Predicted summary: Apple has filed a complaint against the Australian consumer watchdog for misleading advertising on its new iPad
|
courses/machine_learning/deepdive/06_structured/6_deploy.ipynb | ###Markdown
Deploying and predicting with model This notebook illustrates: Deploying model Predicting with model
###Code
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.13'
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/babyweight/trained_model; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical model if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight/trained_model gs://${BUCKET}/babyweight
fi
###Output
_____no_output_____
###Markdown
Deploy trained model Deploying the trained model to act as a REST web service is a simple gcloud call.
###Code
%%bash
gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/
%%bash
MODEL_NAME="babyweight"
MODEL_VERSION="ml_on_gcp"
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/ | tail -1)
echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes"
#gcloud ml-engine versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
#gcloud ml-engine models delete ${MODEL_NAME}
gcloud ml-engine models create ${MODEL_NAME} --regions $REGION
gcloud ml-engine versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version $TFVERSION
###Output
_____no_output_____
###Markdown
Use model to predict (online prediction) Send a JSON request to the endpoint of the service to make it predict a baby's weight. The order of the responses are the order of the instances.
###Code
from oauth2client.client import GoogleCredentials
import requests
import json
MODEL_NAME = 'babyweight'
MODEL_VERSION = 'ml_on_gcp'
token = GoogleCredentials.get_application_default().get_access_token().access_token
api = 'https://ml.googleapis.com/v1/projects/{}/models/{}/versions/{}:predict' \
.format(PROJECT, MODEL_NAME, MODEL_VERSION)
headers = {'Authorization': 'Bearer ' + token }
data = {
'instances': [
{
'key': 'b1',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Single(1)',
'gestation_weeks': 39
},
{
'key': 'g1',
'is_male': 'False',
'mother_age': 29.0,
'plurality': 'Single(1)',
'gestation_weeks': 38
},
{
'key': 'b2',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Triplets(3)',
'gestation_weeks': 39
},
{
'key': 'u1',
'is_male': 'Unknown',
'mother_age': 29.0,
'plurality': 'Multiple(2+)',
'gestation_weeks': 38
},
]
}
response = requests.post(api, json=data, headers=headers)
print(response.content)
###Output
_____no_output_____
###Markdown
The predictions for the four instances were: 7.66, 7.22, 6.32 and 6.19 pounds respectively when I ran it (your results might be different). Use model to predict (batch prediction) Batch prediction is commonly used when you thousands to millions of predictions.Create a file withe one instance per line and submit using gcloud.
###Code
%%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%%bash
INPUT=gs://${BUCKET}/babyweight/batchpred/inputs.json
OUTPUT=gs://${BUCKET}/babyweight/batchpred/outputs
gsutil cp inputs.json $INPUT
gsutil -m rm -rf $OUTPUT
gcloud ml-engine jobs submit prediction babypred_$(date -u +%y%m%d_%H%M%S) \
--data-format=TEXT --region ${REGION} \
--input-paths=$INPUT \
--output-path=$OUTPUT \
--model=babyweight --version=ml_on_gcp
###Output
_____no_output_____
###Markdown
Deploying and predicting with model This notebook illustrates: Deploying model Predicting with model
###Code
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.7'
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical model if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight/trained_model gs://${BUCKET}/babyweight/trained_model
fi
###Output
_____no_output_____
###Markdown
Deploy trained model Deploying the trained model to act as a REST web service is a simple gcloud call.
###Code
%bash
gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/
%bash
MODEL_NAME="babyweight"
MODEL_VERSION="ml_on_gcp"
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/ | tail -1)
echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes"
#gcloud ml-engine versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
#gcloud ml-engine models delete ${MODEL_NAME}
gcloud ml-engine models create ${MODEL_NAME} --regions $REGION
gcloud ml-engine versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version $TFVERSION
###Output
_____no_output_____
###Markdown
Use model to predict (online prediction) Send a JSON request to the endpoint of the service to make it predict a baby's weight. The order of the responses are the order of the instances.
###Code
from oauth2client.client import GoogleCredentials
import requests
import json
MODEL_NAME = 'babyweight'
MODEL_VERSION = 'ml_on_gcp'
token = GoogleCredentials.get_application_default().get_access_token().access_token
api = 'https://ml.googleapis.com/v1/projects/{}/models/{}/versions/{}:predict' \
.format(PROJECT, MODEL_NAME, MODEL_VERSION)
headers = {'Authorization': 'Bearer ' + token }
data = {
'instances': [
{
'key': 'b1',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Single(1)',
'gestation_weeks': 39
},
{
'key': 'g1',
'is_male': 'False',
'mother_age': 29.0,
'plurality': 'Single(1)',
'gestation_weeks': 38
},
{
'key': 'b2',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Triplets(3)',
'gestation_weeks': 39
},
{
'key': 'u1',
'is_male': 'Unknown',
'mother_age': 29.0,
'plurality': 'Multiple(2+)',
'gestation_weeks': 38
},
]
}
response = requests.post(api, json=data, headers=headers)
print response.content
###Output
_____no_output_____
###Markdown
The predictions for the four instances were: 7.66, 7.22, 6.32 and 6.19 pounds respectively when I ran it (your results might be different). Use model to predict (batch prediction) Batch prediction is commonly used when you thousands to millions of predictions.Create a file withe one instance per line and submit using gcloud.
###Code
%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%bash
INPUT=gs://${BUCKET}/babyweight/batchpred/inputs.json
OUTPUT=gs://${BUCKET}/babyweight/batchpred/outputs
gsutil cp inputs.json $INPUT
gsutil -m rm -rf $OUTPUT
gcloud ml-engine jobs submit prediction babypred_$(date -u +%y%m%d_%H%M%S) \
--data-format=TEXT --region ${REGION} \
--input-paths=$INPUT \
--output-path=$OUTPUT \
--model=babyweight --version=ml_on_gcp
###Output
_____no_output_____
###Markdown
Deploying and predicting with model This notebook illustrates: Deploying model Predicting with model
###Code
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.13'
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/babyweight/trained_model; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical model if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight/trained_model gs://${BUCKET}/babyweight/trained_model
fi
###Output
_____no_output_____
###Markdown
Deploy trained model Deploying the trained model to act as a REST web service is a simple gcloud call.
###Code
%%bash
gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/
%%bash
MODEL_NAME="babyweight"
MODEL_VERSION="ml_on_gcp"
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/ | tail -1)
echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes"
#gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
#gcloud ai-platform models delete ${MODEL_NAME}
gcloud ai-platform models create ${MODEL_NAME} --regions $REGION
gcloud ai-platform versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version $TFVERSION
###Output
_____no_output_____
###Markdown
Use model to predict (online prediction) Send a JSON request to the endpoint of the service to make it predict a baby's weight. The order of the responses are the order of the instances.
###Code
from oauth2client.client import GoogleCredentials
import requests
import json
MODEL_NAME = 'babyweight'
MODEL_VERSION = 'ml_on_gcp'
token = GoogleCredentials.get_application_default().get_access_token().access_token
api = 'https://ml.googleapis.com/v1/projects/{}/models/{}/versions/{}:predict' \
.format(PROJECT, MODEL_NAME, MODEL_VERSION)
headers = {'Authorization': 'Bearer ' + token }
data = {
'instances': [
{
'key': 'b1',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Single(1)',
'gestation_weeks': 39
},
{
'key': 'g1',
'is_male': 'False',
'mother_age': 29.0,
'plurality': 'Single(1)',
'gestation_weeks': 38
},
{
'key': 'b2',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Triplets(3)',
'gestation_weeks': 39
},
{
'key': 'u1',
'is_male': 'Unknown',
'mother_age': 29.0,
'plurality': 'Multiple(2+)',
'gestation_weeks': 38
},
]
}
response = requests.post(api, json=data, headers=headers)
print(response.content)
###Output
_____no_output_____
###Markdown
The predictions for the four instances were: 7.66, 7.22, 6.32 and 6.19 pounds respectively when I ran it (your results might be different). Use model to predict (batch prediction) Batch prediction is commonly used when you thousands to millions of predictions.Create a file with one instance per line and submit using gcloud.
###Code
%%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%%bash
INPUT=gs://${BUCKET}/babyweight/batchpred/inputs.json
OUTPUT=gs://${BUCKET}/babyweight/batchpred/outputs
gsutil cp inputs.json $INPUT
gsutil -m rm -rf $OUTPUT
gcloud ai-platform jobs submit prediction babypred_$(date -u +%y%m%d_%H%M%S) \
--data-format=TEXT --region ${REGION} \
--input-paths=$INPUT \
--output-path=$OUTPUT \
--model=babyweight --version=ml_on_gcp
###Output
_____no_output_____
###Markdown
Deploying and predicting with model This notebook illustrates: Deploying model Predicting with model
###Code
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.1
# change these to try this notebook out
BUCKET = 'cloud-trainingdemosml'
PROJECT = 'mlops-content'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '2.1'
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/babyweight/trained_model; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical model if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight/trained_model gs://${BUCKET}/babyweight/trained_model
fi
###Output
Creating gs://cloud-trainingdemosml/...
ServiceException: 409 A Cloud Storage bucket named 'cloud-trainingdemosml' already exists. Try another name. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
Copying gs://cloud-training-demos/babyweight/trained_model/checkpoint...
Copying gs://cloud-training-demos/babyweight/trained_model/export/exporter/1529355466/variables/variables.index...
Copying gs://cloud-training-demos/babyweight/trained_model/eval/events.out.tfevents.1529348264.cmle-training-master-a137ac0fff-0-9q8r4...
Copying gs://cloud-training-demos/babyweight/trained_model/events.out.tfevents.1529347276.cmle-training-master-a137ac0fff-0-9q8r4...
Copying gs://cloud-training-demos/babyweight/trained_model/export/exporter/1529355466/saved_model.pb...
Copying gs://cloud-training-demos/babyweight/trained_model/export/exporter/1529355466/variables/variables.data-00000-of-00001...
Copying gs://cloud-training-demos/babyweight/trained_model/graph.pbtxt...
Copying gs://cloud-training-demos/babyweight/trained_model/model.ckpt-342784.data-00000-of-00003...
Copying gs://cloud-training-demos/babyweight/trained_model/model.ckpt-342784.data-00001-of-00003...
Copying gs://cloud-training-demos/babyweight/trained_model/model.ckpt-342784.data-00002-of-00003...
Copying gs://cloud-training-demos/babyweight/trained_model/model.ckpt-342784.index...
Copying gs://cloud-training-demos/babyweight/trained_model/model.ckpt-342784.meta...
Copying gs://cloud-training-demos/babyweight/trained_model/model.ckpt-376661.data-00001-of-00003...
Copying gs://cloud-training-demos/babyweight/trained_model/model.ckpt-376661.data-00000-of-00003...
Copying gs://cloud-training-demos/babyweight/trained_model/model.ckpt-376661.data-00002-of-00003...
Copying gs://cloud-training-demos/babyweight/trained_model/model.ckpt-376661.index...
Copying gs://cloud-training-demos/babyweight/trained_model/model.ckpt-376661.meta...
Copying gs://cloud-training-demos/babyweight/trained_model/model.ckpt-390628.data-00000-of-00003...
Copying gs://cloud-training-demos/babyweight/trained_model/model.ckpt-390628.data-00001-of-00003...
Copying gs://cloud-training-demos/babyweight/trained_model/model.ckpt-390628.data-00002-of-00003...
Copying gs://cloud-training-demos/babyweight/trained_model/model.ckpt-390628.index...
Copying gs://cloud-training-demos/babyweight/trained_model/model.ckpt-390628.meta...
\ [22/22 files][ 6.5 MiB/ 6.5 MiB] 100% Done
Operation completed over 22 objects/6.5 MiB.
###Markdown
Deploy trained model Deploying the trained model to act as a REST web service is a simple gcloud call.
###Code
%%bash
gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/
%%bash
MODEL_NAME="babyweight"
MODEL_VERSION="v1"
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/ | tail -1)
echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes"
#gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
#gcloud ai-platform models delete ${MODEL_NAME}
gcloud ai-platform models create ${MODEL_NAME} --regions $REGION
#gcloud ai-platform versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version $TFVERSION
###Output
Deleting and deploying babyweight ml_on_gcp from gs://cloud-trainingdemosml/babyweight/trained_model/export/exporter/1529355466/ ... this will take a few minutes
###Markdown
Use model to predict (online prediction) Send a JSON request to the endpoint of the service to make it predict a baby's weight. The order of the responses are the order of the instances.
###Code
from oauth2client.client import GoogleCredentials
import requests
import json
MODEL_NAME = 'babyweight'
MODEL_VERSION = 'v1'
token = GoogleCredentials.get_application_default().get_access_token().access_token
api = 'https://ml.googleapis.com/v1/projects/{}/models/{}/versions/{}:predict' \
.format(PROJECT, MODEL_NAME, MODEL_VERSION)
headers = {'Authorization': 'Bearer ' + token }
data = {
'instances': [
{
'key': 'b1',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Single(1)',
'gestation_weeks': 39
},
{
'key': 'g1',
'is_male': 'False',
'mother_age': 29.0,
'plurality': 'Single(1)',
'gestation_weeks': 38
},
{
'key': 'b2',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Triplets(3)',
'gestation_weeks': 39
},
{
'key': 'u1',
'is_male': 'Unknown',
'mother_age': 29.0,
'plurality': 'Multiple(2+)',
'gestation_weeks': 38
},
]
}
response = requests.post(api, json=data, headers=headers)
print(response.content)
###Output
b'{"predictions": [{"key": ["b1"], "predictions": [7.740230083465576]}, {"key": ["g1"], "predictions": [7.247548580169678]}, {"key": ["b2"], "predictions": [6.182091236114502]}, {"key": ["u1"], "predictions": [6.13692569732666]}]}'
###Markdown
The predictions for the four instances were: 7.66, 7.22, 6.32 and 6.19 pounds respectively when I ran it (your results might be different). Use model to predict (batch prediction) Batch prediction is commonly used when you thousands to millions of predictions.Create a file with one instance per line and submit using gcloud.
###Code
%%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%%bash
INPUT=gs://${BUCKET}/babyweight/batchpred/inputs.json
OUTPUT=gs://${BUCKET}/babyweight/batchpred/outputs
gsutil cp inputs.json $INPUT
gsutil -m rm -rf $OUTPUT
gcloud ai-platform jobs submit prediction babypred_$(date -u +%y%m%d_%H%M%S) \
--data-format=TEXT --region ${REGION} \
--input-paths=$INPUT \
--output-path=$OUTPUT \
--model=babyweight --version=v1
! gcloud ai-platform jobs stream-logs babypred_210610_053858
###Output
_____no_output_____
###Markdown
Deploying and predicting with model This notebook illustrates: Deploying model Predicting with model
###Code
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.13'
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/babyweight/trained_model; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical model if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight/trained_model gs://${BUCKET}/babyweight
fi
###Output
_____no_output_____
###Markdown
Deploy trained model Deploying the trained model to act as a REST web service is a simple gcloud call.
###Code
%%bash
gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/
%%bash
MODEL_NAME="babyweight"
MODEL_VERSION="ml_on_gcp"
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/ | tail -1)
echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes"
#gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
#gcloud ai-platform models delete ${MODEL_NAME}
gcloud ai-platform models create ${MODEL_NAME} --regions $REGION
gcloud ai-platform versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version $TFVERSION
###Output
_____no_output_____
###Markdown
Use model to predict (online prediction) Send a JSON request to the endpoint of the service to make it predict a baby's weight. The order of the responses are the order of the instances.
###Code
from oauth2client.client import GoogleCredentials
import requests
import json
MODEL_NAME = 'babyweight'
MODEL_VERSION = 'ml_on_gcp'
token = GoogleCredentials.get_application_default().get_access_token().access_token
api = 'https://ml.googleapis.com/v1/projects/{}/models/{}/versions/{}:predict' \
.format(PROJECT, MODEL_NAME, MODEL_VERSION)
headers = {'Authorization': 'Bearer ' + token }
data = {
'instances': [
{
'key': 'b1',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Single(1)',
'gestation_weeks': 39
},
{
'key': 'g1',
'is_male': 'False',
'mother_age': 29.0,
'plurality': 'Single(1)',
'gestation_weeks': 38
},
{
'key': 'b2',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Triplets(3)',
'gestation_weeks': 39
},
{
'key': 'u1',
'is_male': 'Unknown',
'mother_age': 29.0,
'plurality': 'Multiple(2+)',
'gestation_weeks': 38
},
]
}
response = requests.post(api, json=data, headers=headers)
print(response.content)
###Output
_____no_output_____
###Markdown
The predictions for the four instances were: 7.66, 7.22, 6.32 and 6.19 pounds respectively when I ran it (your results might be different). Use model to predict (batch prediction) Batch prediction is commonly used when you thousands to millions of predictions.Create a file with one instance per line and submit using gcloud.
###Code
%%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%%bash
INPUT=gs://${BUCKET}/babyweight/batchpred/inputs.json
OUTPUT=gs://${BUCKET}/babyweight/batchpred/outputs
gsutil cp inputs.json $INPUT
gsutil -m rm -rf $OUTPUT
gcloud ai-platform jobs submit prediction babypred_$(date -u +%y%m%d_%H%M%S) \
--data-format=TEXT --region ${REGION} \
--input-paths=$INPUT \
--output-path=$OUTPUT \
--model=babyweight --version=ml_on_gcp
###Output
_____no_output_____
###Markdown
Deploying and predicting with model This notebook illustrates: Deploying model Predicting with model
###Code
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.1
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '2.1'
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/babyweight/trained_model; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical model if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight/trained_model gs://${BUCKET}/babyweight/trained_model
fi
###Output
_____no_output_____
###Markdown
Deploy trained model Deploying the trained model to act as a REST web service is a simple gcloud call.
###Code
%%bash
gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/
%%bash
MODEL_NAME="babyweight"
MODEL_VERSION="ml_on_gcp"
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/ | tail -1)
echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes"
#gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
#gcloud ai-platform models delete ${MODEL_NAME}
gcloud ai-platform models create ${MODEL_NAME} --regions $REGION
gcloud ai-platform versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version $TFVERSION
###Output
_____no_output_____
###Markdown
Use model to predict (online prediction) Send a JSON request to the endpoint of the service to make it predict a baby's weight. The order of the responses are the order of the instances.
###Code
from oauth2client.client import GoogleCredentials
import requests
import json
MODEL_NAME = 'babyweight'
MODEL_VERSION = 'ml_on_gcp'
token = GoogleCredentials.get_application_default().get_access_token().access_token
api = 'https://ml.googleapis.com/v1/projects/{}/models/{}/versions/{}:predict' \
.format(PROJECT, MODEL_NAME, MODEL_VERSION)
headers = {'Authorization': 'Bearer ' + token }
data = {
'instances': [
{
'key': 'b1',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Single(1)',
'gestation_weeks': 39
},
{
'key': 'g1',
'is_male': 'False',
'mother_age': 29.0,
'plurality': 'Single(1)',
'gestation_weeks': 38
},
{
'key': 'b2',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Triplets(3)',
'gestation_weeks': 39
},
{
'key': 'u1',
'is_male': 'Unknown',
'mother_age': 29.0,
'plurality': 'Multiple(2+)',
'gestation_weeks': 38
},
]
}
response = requests.post(api, json=data, headers=headers)
print(response.content)
###Output
_____no_output_____
###Markdown
The predictions for the four instances were: 7.66, 7.22, 6.32 and 6.19 pounds respectively when I ran it (your results might be different). Use model to predict (batch prediction) Batch prediction is commonly used when you thousands to millions of predictions.Create a file with one instance per line and submit using gcloud.
###Code
%%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%%bash
INPUT=gs://${BUCKET}/babyweight/batchpred/inputs.json
OUTPUT=gs://${BUCKET}/babyweight/batchpred/outputs
gsutil cp inputs.json $INPUT
gsutil -m rm -rf $OUTPUT
gcloud ai-platform jobs submit prediction babypred_$(date -u +%y%m%d_%H%M%S) \
--data-format=TEXT --region ${REGION} \
--input-paths=$INPUT \
--output-path=$OUTPUT \
--model=babyweight --version=ml_on_gcp
###Output
_____no_output_____
###Markdown
Deploying and predicting with model This notebook illustrates: Deploying model Predicting with model
###Code
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.1
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '2.1'
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/babyweight/trained_model; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical model if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight/trained_model gs://${BUCKET}/babyweight/trained_model
fi
###Output
_____no_output_____
###Markdown
Deploy trained model Deploying the trained model to act as a REST web service is a simple gcloud call.
###Code
%%bash
gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/
%%bash
MODEL_NAME="babyweight"
MODEL_VERSION="ml_on_gcp"
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/ | tail -1)
echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes"
#gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
#gcloud ai-platform models delete ${MODEL_NAME}
gcloud ai-platform models create ${MODEL_NAME} --regions $REGION
gcloud ai-platform versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version $TFVERSION
###Output
_____no_output_____
###Markdown
Use model to predict (online prediction) Send a JSON request to the endpoint of the service to make it predict a baby's weight. The order of the responses are the order of the instances.
###Code
from oauth2client.client import GoogleCredentials
import requests
import json
MODEL_NAME = 'babyweight'
MODEL_VERSION = 'ml_on_gcp'
token = GoogleCredentials.get_application_default().get_access_token().access_token
api = 'https://ml.googleapis.com/v1/projects/{}/models/{}/versions/{}:predict' \
.format(PROJECT, MODEL_NAME, MODEL_VERSION)
headers = {'Authorization': 'Bearer ' + token }
data = {
'instances': [
{
'key': 'b1',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Single(1)',
'gestation_weeks': 39
},
{
'key': 'g1',
'is_male': 'False',
'mother_age': 29.0,
'plurality': 'Single(1)',
'gestation_weeks': 38
},
{
'key': 'b2',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Triplets(3)',
'gestation_weeks': 39
},
{
'key': 'u1',
'is_male': 'Unknown',
'mother_age': 29.0,
'plurality': 'Multiple(2+)',
'gestation_weeks': 38
},
]
}
response = requests.post(api, json=data, headers=headers)
print(response.content)
###Output
_____no_output_____
###Markdown
The predictions for the four instances were: 7.66, 7.22, 6.32 and 6.19 pounds respectively when I ran it (your results might be different). Use model to predict (batch prediction) Batch prediction is commonly used when you thousands to millions of predictions.Create a file with one instance per line and submit using gcloud.
###Code
%%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%%bash
INPUT=gs://${BUCKET}/babyweight/batchpred/inputs.json
OUTPUT=gs://${BUCKET}/babyweight/batchpred/outputs
gsutil cp inputs.json $INPUT
gsutil -m rm -rf $OUTPUT
gcloud ai-platform jobs submit prediction babypred_$(date -u +%y%m%d_%H%M%S) \
--data-format=TEXT --region ${REGION} \
--input-paths=$INPUT \
--output-path=$OUTPUT \
--model=babyweight --version=ml_on_gcp
###Output
_____no_output_____
###Markdown
Deploying and predicting with model This notebook illustrates: Deploying model Predicting with model
###Code
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.1
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '2.1'
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/babyweight/trained_model; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical model if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight/trained_model gs://${BUCKET}/babyweight/trained_model
fi
###Output
_____no_output_____
###Markdown
Deploy trained model Deploying the trained model to act as a REST web service is a simple gcloud call.
###Code
%%bash
gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/
%%bash
MODEL_NAME="babyweight"
MODEL_VERSION="ml_on_gcp"
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/ | tail -1)
echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes"
#gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
#gcloud ai-platform models delete ${MODEL_NAME}
gcloud ai-platform models create ${MODEL_NAME} --regions $REGION
gcloud ai-platform versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version $TFVERSION
###Output
_____no_output_____
###Markdown
Use model to predict (online prediction) Send a JSON request to the endpoint of the service to make it predict a baby's weight. The order of the responses are the order of the instances.
###Code
from oauth2client.client import GoogleCredentials
import requests
import json
MODEL_NAME = 'babyweight'
MODEL_VERSION = 'ml_on_gcp'
token = GoogleCredentials.get_application_default().get_access_token().access_token
api = 'https://ml.googleapis.com/v1/projects/{}/models/{}/versions/{}:predict' \
.format(PROJECT, MODEL_NAME, MODEL_VERSION)
headers = {'Authorization': 'Bearer ' + token }
data = {
'instances': [
{
'key': 'b1',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Single(1)',
'gestation_weeks': 39
},
{
'key': 'g1',
'is_male': 'False',
'mother_age': 29.0,
'plurality': 'Single(1)',
'gestation_weeks': 38
},
{
'key': 'b2',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Triplets(3)',
'gestation_weeks': 39
},
{
'key': 'u1',
'is_male': 'Unknown',
'mother_age': 29.0,
'plurality': 'Multiple(2+)',
'gestation_weeks': 38
},
]
}
response = requests.post(api, json=data, headers=headers)
print(response.content)
###Output
_____no_output_____
###Markdown
The predictions for the four instances were: 7.66, 7.22, 6.32 and 6.19 pounds respectively when I ran it (your results might be different). Use model to predict (batch prediction) Batch prediction is commonly used when you thousands to millions of predictions.Create a file with one instance per line and submit using gcloud.
###Code
%%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%%bash
INPUT=gs://${BUCKET}/babyweight/batchpred/inputs.json
OUTPUT=gs://${BUCKET}/babyweight/batchpred/outputs
gsutil cp inputs.json $INPUT
gsutil -m rm -rf $OUTPUT
gcloud ai-platform jobs submit prediction babypred_$(date -u +%y%m%d_%H%M%S) \
--data-format=TEXT --region ${REGION} \
--input-paths=$INPUT \
--output-path=$OUTPUT \
--model=babyweight --version=ml_on_gcp
###Output
_____no_output_____
###Markdown
Deploying and predicting with model This notebook illustrates: Deploying model Predicting with model
###Code
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.7'
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/babyweight/trained_model; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical model if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight/trained_model gs://${BUCKET}/babyweight/trained_model
fi
###Output
_____no_output_____
###Markdown
Deploy trained model Deploying the trained model to act as a REST web service is a simple gcloud call.
###Code
%bash
gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/
%bash
MODEL_NAME="babyweight"
MODEL_VERSION="ml_on_gcp"
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/ | tail -1)
echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes"
#gcloud ml-engine versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
#gcloud ml-engine models delete ${MODEL_NAME}
gcloud ml-engine models create ${MODEL_NAME} --regions $REGION
gcloud ml-engine versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version $TFVERSION
###Output
_____no_output_____
###Markdown
Use model to predict (online prediction) Send a JSON request to the endpoint of the service to make it predict a baby's weight. The order of the responses are the order of the instances.
###Code
from oauth2client.client import GoogleCredentials
import requests
import json
MODEL_NAME = 'babyweight'
MODEL_VERSION = 'ml_on_gcp'
token = GoogleCredentials.get_application_default().get_access_token().access_token
api = 'https://ml.googleapis.com/v1/projects/{}/models/{}/versions/{}:predict' \
.format(PROJECT, MODEL_NAME, MODEL_VERSION)
headers = {'Authorization': 'Bearer ' + token }
data = {
'instances': [
{
'key': 'b1',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Single(1)',
'gestation_weeks': 39
},
{
'key': 'g1',
'is_male': 'False',
'mother_age': 29.0,
'plurality': 'Single(1)',
'gestation_weeks': 38
},
{
'key': 'b2',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Triplets(3)',
'gestation_weeks': 39
},
{
'key': 'u1',
'is_male': 'Unknown',
'mother_age': 29.0,
'plurality': 'Multiple(2+)',
'gestation_weeks': 38
},
]
}
response = requests.post(api, json=data, headers=headers)
print(response.content)
###Output
_____no_output_____
###Markdown
The predictions for the four instances were: 7.66, 7.22, 6.32 and 6.19 pounds respectively when I ran it (your results might be different). Use model to predict (batch prediction) Batch prediction is commonly used when you thousands to millions of predictions.Create a file withe one instance per line and submit using gcloud.
###Code
%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%bash
INPUT=gs://${BUCKET}/babyweight/batchpred/inputs.json
OUTPUT=gs://${BUCKET}/babyweight/batchpred/outputs
gsutil cp inputs.json $INPUT
gsutil -m rm -rf $OUTPUT
gcloud ml-engine jobs submit prediction babypred_$(date -u +%y%m%d_%H%M%S) \
--data-format=TEXT --region ${REGION} \
--input-paths=$INPUT \
--output-path=$OUTPUT \
--model=babyweight --version=ml_on_gcp
###Output
_____no_output_____
###Markdown
Deploying and predicting with model This notebook illustrates: Deploying model Predicting with model
###Code
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.7'
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical model if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight/trained_model gs://${BUCKET}/babyweight
fi
###Output
_____no_output_____
###Markdown
Deploy trained model Deploying the trained model to act as a REST web service is a simple gcloud call.
###Code
%bash
gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/
%bash
MODEL_NAME="babyweight"
MODEL_VERSION="ml_on_gcp"
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/ | tail -1)
echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes"
#gcloud ml-engine versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
#gcloud ml-engine models delete ${MODEL_NAME}
gcloud ml-engine models create ${MODEL_NAME} --regions $REGION
gcloud ml-engine versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version $TFVERSION
###Output
_____no_output_____
###Markdown
Use model to predict (online prediction) Send a JSON request to the endpoint of the service to make it predict a baby's weight. The order of the responses are the order of the instances.
###Code
from oauth2client.client import GoogleCredentials
import requests
import json
MODEL_NAME = 'babyweight'
MODEL_VERSION = 'ml_on_gcp'
token = GoogleCredentials.get_application_default().get_access_token().access_token
api = 'https://ml.googleapis.com/v1/projects/{}/models/{}/versions/{}:predict' \
.format(PROJECT, MODEL_NAME, MODEL_VERSION)
headers = {'Authorization': 'Bearer ' + token }
data = {
'instances': [
{
'key': 'b1',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Single(1)',
'gestation_weeks': 39
},
{
'key': 'g1',
'is_male': 'False',
'mother_age': 29.0,
'plurality': 'Single(1)',
'gestation_weeks': 38
},
{
'key': 'b2',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Triplets(3)',
'gestation_weeks': 39
},
{
'key': 'u1',
'is_male': 'Unknown',
'mother_age': 29.0,
'plurality': 'Multiple(2+)',
'gestation_weeks': 38
},
]
}
response = requests.post(api, json=data, headers=headers)
print response.content
###Output
_____no_output_____
###Markdown
The predictions for the four instances were: 7.66, 7.22, 6.32 and 6.19 pounds respectively when I ran it (your results might be different). Use model to predict (batch prediction) Batch prediction is commonly used when you thousands to millions of predictions.Create a file withe one instance per line and submit using gcloud.
###Code
%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%bash
INPUT=gs://${BUCKET}/babyweight/batchpred/inputs.json
OUTPUT=gs://${BUCKET}/babyweight/batchpred/outputs
gsutil cp inputs.json $INPUT
gsutil -m rm -rf $OUTPUT
gcloud ml-engine jobs submit prediction babypred_$(date -u +%y%m%d_%H%M%S) \
--data-format=TEXT --region ${REGION} \
--input-paths=$INPUT \
--output-path=$OUTPUT \
--model=babyweight --version=ml_on_gcp
###Output
_____no_output_____
###Markdown
Deploying and predicting with model This notebook illustrates: Deploying model Predicting with model
###Code
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.1
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '2.1'
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/babyweight/trained_model; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical model if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight/trained_model gs://${BUCKET}/babyweight/trained_model
fi
###Output
_____no_output_____
###Markdown
Deploy trained model Deploying the trained model to act as a REST web service is a simple gcloud call.
###Code
%%bash
gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/
%%bash
MODEL_NAME="babyweight"
MODEL_VERSION="ml_on_gcp"
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/ | tail -1)
echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes"
#gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
#gcloud ai-platform models delete ${MODEL_NAME}
gcloud ai-platform models create ${MODEL_NAME} --regions $REGION
gcloud ai-platform versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version $TFVERSION
###Output
_____no_output_____
###Markdown
Use model to predict (online prediction) Send a JSON request to the endpoint of the service to make it predict a baby's weight. The order of the responses are the order of the instances.
###Code
from oauth2client.client import GoogleCredentials
import requests
import json
MODEL_NAME = 'babyweight'
MODEL_VERSION = 'ml_on_gcp'
token = GoogleCredentials.get_application_default().get_access_token().access_token
api = 'https://ml.googleapis.com/v1/projects/{}/models/{}/versions/{}:predict' \
.format(PROJECT, MODEL_NAME, MODEL_VERSION)
headers = {'Authorization': 'Bearer ' + token }
data = {
'instances': [
{
'key': 'b1',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Single(1)',
'gestation_weeks': 39
},
{
'key': 'g1',
'is_male': 'False',
'mother_age': 29.0,
'plurality': 'Single(1)',
'gestation_weeks': 38
},
{
'key': 'b2',
'is_male': 'True',
'mother_age': 26.0,
'plurality': 'Triplets(3)',
'gestation_weeks': 39
},
{
'key': 'u1',
'is_male': 'Unknown',
'mother_age': 29.0,
'plurality': 'Multiple(2+)',
'gestation_weeks': 38
},
]
}
response = requests.post(api, json=data, headers=headers)
print(response.content)
###Output
_____no_output_____
###Markdown
The predictions for the four instances were: 7.66, 7.22, 6.32 and 6.19 pounds respectively when I ran it (your results might be different). Use model to predict (batch prediction) Batch prediction is commonly used when you thousands to millions of predictions.Create a file with one instance per line and submit using gcloud.
###Code
%%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%%bash
INPUT=gs://${BUCKET}/babyweight/batchpred/inputs.json
OUTPUT=gs://${BUCKET}/babyweight/batchpred/outputs
gsutil cp inputs.json $INPUT
gsutil -m rm -rf $OUTPUT
gcloud ai-platform jobs submit prediction babypred_$(date -u +%y%m%d_%H%M%S) \
--data-format=TEXT --region ${REGION} \
--input-paths=$INPUT \
--output-path=$OUTPUT \
--model=babyweight --version=ml_on_gcp
###Output
_____no_output_____ |
Notebooks/Notebook 1 - Data Exploration & Cleaning.ipynb | ###Markdown
A Formula for Success: Predicting How to Win in Baseball Notebook 1 I. The ProblemBaseball is a sport of numbers and statistics. Because of this, there are all sorts of measurements of what happened in a game, as well as team and specific player performance. Over the years, and made more popular by fantasy sports and books/movies such as Moneyball, statistics have gotten even better at determining the value of players. These advanced statistics are commonly referred to as sabermetrics.But what is the formula for a team's success? Obviously, it's winning, because a team must win in order to become championship contenders. And obviously, winning is accomplished by scoring more runs than your opponent. But if you were a general manager of a baseball franchise, you would want to go beyond that to determine a more precise equation to field a consistently winning team. Then, based on this formula, a general manager should be able to determine a player's value in terms of contributing to a team's winning chances. Research QuestionsAs such, this project focuses on answering the following questions:* What features most significantly impact the winning percentage of a given Major League Baseball (MLB) team?* For those given features, to what extent do they impact a team's winning percentage?* Based on this, who has been the most valuable player in contributing to a team's winning percentage from 2015-2020? By answering these questions, a general manager can build a roster to maximize it's odds of winning. As such, a general manager could target particular players in free agency or through trades to acquire them. Furthermore, since spending in baseball is limited based on what a particular team can afford in its market, as well as a tax beyond a particular salary threshold, a general manager could use this formula to determine if a given player is an improvement over a player currently rostered at that position on the team.This particular notebook will work to answer the first two of the research questions. II. Data AcquisitionTo answer these questions, we will need to find data over the last few seasons from MLB. We will need both team statistics to determine the equation for winning and player statistics to determine who has contributed the most to winning. To acquire this data, we will use the MLB statistic repository Fangraphs.com. This data will be pulled specifically from the [database found here](https://www.fangraphs.com/leaders.aspx?pos=all&stats=bat&lg=all&qual=y&type=8&season=2020&month=0&season1=2020&ind=0). We will use multiple requests, then export each of them. For the team stats, you click on the "Team Stats" panel, select multiple seasons, split them, hit submit, then export. Do this for the Batting, Pitching, and Fielding tabs. We split the seasons because a given team will vary in its performance--and its roster--from year to year, adding to our sample size. To build an adequate sample size, we will pull data for each season beginning in 2006. For the individual stats, you click on the "Player Stats" panel; because we are viewing individual players stats over the past few seasons, we will select "Multiple Seasons" from 2015 to 2020.While the years selected for this sample seem arbitrary, they are not. Baseball is known to have time periods that stylize play, commonly known as eras. From the late 1980s into most of the 2000s, power hitting had a surprising jolt due to the prevalence of steroids and other performance-enhancing drugs. Long-standing home run records were broken multiple times during this period. The 2006 season is when MLB began to test and discipline players for performance-enhancing drugs, and therefore is a good starting point for our dataset.This process can easily be repeated by others who wish to replicate or customize the data pulled for their own usage. To begin, we will need to pull the various dataframes, then combine them into a single dataframe.
###Code
# import needed tools and libraries
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
# Retrieve CSVs into dataframes
team_bat = pd.read_csv(r'~/Github/DATA601_FinalProject/Data/TeamStats_Batting.csv')
team_pitch = pd.read_csv(r'~/Github/DATA601_FinalProject/Data/TeamStats_Pitching.csv')
team_field = pd.read_csv(r'~/Github/DATA601_FinalProject/Data/TeamStats_Fielding.csv')
# Update team names with year to prevent confusion (So 2019 Orioles and 2020 Orioles are clear in Team column)
team_bat['TeamYear'] = team_bat['Season'].astype(str) + '_' + team_bat['Team']
team_pitch['TeamYear'] = team_pitch['Season'].astype(str) + '_' + team_pitch['Team']
team_field['TeamYear'] = team_field['Season'].astype(str) + '_' + team_field['Team']
# Move new TeamYear column to front
team_bat = team_bat[['TeamYear'] + [col for col in team_bat.columns if col != 'TeamYear']]
team_pitch = team_pitch[['TeamYear'] + [col for col in team_pitch.columns if col != 'TeamYear']]
team_field = team_field[['TeamYear'] + [col for col in team_field.columns if col != 'TeamYear']]
# Merge dataframes
teamstats = pd.merge(team_bat, team_field, how = 'outer', on = 'TeamYear')
teamstats = pd.merge(teamstats, team_pitch, how = 'outer', on = 'TeamYear')
# Drop repeat columns and rearrange columns
teamstats.drop(['Season_x', 'Team_x', 'Season_y', 'Team_y'], axis=1, inplace = True)
move_cols = ['TeamYear', 'Season', 'Team']
teamstats = teamstats[move_cols + [col for col in teamstats.columns if col not in move_cols]]
# Check dataframe
teamstats
# Save dataframe to CSV
teamstats.to_csv(r'~/Github/DATA601_FinalProject/Data/TeamStats_Combined.csv', index = None)
###Output
_____no_output_____
###Markdown
III. Data Exploration - Part 1Why part one. Well, as you will see below, we will actually need to do some data cleaning and feature engineering in order to properly explore the data.
###Code
# Copy teamstats dataframe by pulling from newly saved CSV
df = pd.read_csv(r'~/Github/DATA601_FinalProject/Data/TeamStats_Combined.csv')
df.head()
# Look at the stats of the data
df.describe()
# Look at details of the data
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 450 entries, 0 to 449
Data columns (total 64 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 TeamYear 450 non-null object
1 Season 450 non-null int64
2 Team 450 non-null object
3 G_x 450 non-null int64
4 PA 450 non-null int64
5 HR 450 non-null int64
6 R 450 non-null int64
7 RBI 450 non-null int64
8 SB 450 non-null int64
9 BB% 450 non-null object
10 K% 450 non-null object
11 ISO 450 non-null float64
12 BABIP_x 450 non-null float64
13 AVG 450 non-null float64
14 OBP 450 non-null float64
15 SLG 450 non-null float64
16 wOBA 450 non-null float64
17 wRC+ 450 non-null int64
18 EV_x 180 non-null float64
19 BsR 450 non-null float64
20 Off 450 non-null float64
21 Def_x 450 non-null float64
22 WAR_x 450 non-null float64
23 Inn 450 non-null int64
24 rSZ 450 non-null int64
25 rCERA 450 non-null int64
26 rSB 450 non-null int64
27 rGDP 450 non-null int64
28 rARM 450 non-null int64
29 rGFP 450 non-null int64
30 rPM 450 non-null int64
31 rTS 330 non-null float64
32 DRS 450 non-null int64
33 BIZ 450 non-null int64
34 Plays 450 non-null int64
35 RZR 450 non-null float64
36 OOZ 450 non-null int64
37 FSR 300 non-null float64
38 FRM 390 non-null float64
39 ARM 450 non-null float64
40 DPR 450 non-null float64
41 RngR 450 non-null float64
42 ErrR 450 non-null float64
43 UZR 450 non-null float64
44 UZR/150 450 non-null float64
45 Def_y 450 non-null float64
46 W 450 non-null int64
47 L 450 non-null int64
48 SV 450 non-null int64
49 G_y 450 non-null int64
50 GS 450 non-null int64
51 IP 450 non-null float64
52 K/9 450 non-null float64
53 BB/9 450 non-null float64
54 HR/9 450 non-null float64
55 BABIP_y 450 non-null float64
56 LOB% 450 non-null object
57 GB% 450 non-null object
58 HR/FB 450 non-null object
59 EV_y 180 non-null float64
60 ERA 450 non-null float64
61 FIP 450 non-null float64
62 xFIP 450 non-null float64
63 WAR_y 450 non-null float64
dtypes: float64(32), int64(25), object(7)
memory usage: 225.1+ KB
###Markdown
At first glance, we see a mixture of different datatypes, including strings/objects, integers, and floats. However, aside from the "Team" column, all of our data should be numerical. This means that the other columns labeled as objects are incorrectly categorized. If we look back at the header (first five rows) of the table, we see that this is due to the percentage sign (%) being used in the column. We will need to correct this during our data cleaning. We also see some columns have many null values. We will need to clean these as well.
###Code
# Check the number of unique values
df.nunique(axis = 0)
###Output
_____no_output_____
###Markdown
One thing that stands out is that there are 31 unique entries for the Team feature. Considering there are only 30 MLB teams, we know that there must be something we need to alter here. No teams/franchises have moved; however, the Tampa Bay Devil Rays did rename themselves the Rays for the 2008 season. That could be the cause. Let's check if that is what we are seeing here.
###Code
# Look for team name including string 'Rays'
df[df['Team'].str.contains('Rays')]
###Output
_____no_output_____
###Markdown
As we see above, the Tampa Bay franchise has two names in our dataset: the Devil Rays and the Rays. We will need to clean this up for simplicity in the data cleaning section. Since our target variable would be wins, let's take a look at it more closely.
###Code
# Box plot for wins
sns.boxplot(y = df['W'])
###Output
_____no_output_____
###Markdown
We see a ton of outliers here, which is something worth exploring. Let's view this distribution in a histogram.
###Code
# Histogram of wins
x = df['W']
num_bins = 10
n, bins, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
# Histogram settings
plt.xlabel('Wins')
plt.ylabel('Frequency')
plt.title('Histogram of Frequency of Wins in a Season')
plt.show()
###Output
_____no_output_____
###Markdown
We would expect to see a rather normal distribution here, and we can see a portion of the histogram between approximately 50 and 105 wins appears normally distributed. So what could explain this?It is very likely the result of the shortened 2020 season. The standard MLB season is 162 games; due to the COVID-19 pandemic, the 2020 season was shortened to only 60 games.This leaves us with a decision to make: do we remove the 2020 season altogether, or do we still keep it as part of our dataset? III. Data Cleaning - Part 1The solution we will use will be to view wins as a percentage of total games played rather than raw total wins. This is otherwise known as winning percentage. We will need to create this as a new feature.
###Code
# Create new Winning Percentage column
# Win percentage = Wins / Games Started
df['W%'] = (df['W']/df['GS'])
# Check with header
df[['Team', 'W', 'L', 'W%']]
###Output
_____no_output_____
###Markdown
Now let's check the box plot and histogram again.
###Code
# Box plot for win percentage
sns.boxplot(y = df['W%'])
# Histogram of win percentage
x = df['W%']
num_bins = 10
n, bins, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
# Histogram settings
plt.xlabel('Win Percentage')
plt.ylabel('Frequency')
plt.title('Histogram of Frequency of Win %')
plt.savefig('WPHistogram.png', bbox_inches = 'tight')
plt.show()
###Output
_____no_output_____
###Markdown
We see that the win percentage is evenly distributed, which shows that the shortened 2020 season was the issue. However, by extension, we can assume that other columns with raw total data as opposed to ratio data is similarly affected by the shortened season. This means that we must update these columns as well prior to exploring the data.
###Code
# Create list of columns to be converted
col_change = ['PA', 'HR', 'R', 'RBI', 'SB', 'Inn', 'BIZ', 'Plays','IP']
# Rewrite columns with their "per game" data
df[col_change] = df[col_change].div(df['GS'].values, axis = 0)
# Check work
df.head(10)
###Output
_____no_output_____
###Markdown
Thankfully, this worked, as we see our data has been converted to a per-game basis. We will also need to change the string columns with percentages in them to floats. We will do this next.
###Code
# Create list of columns with percentages to convert to floats
col_change2 = ['BB%', 'K%', 'LOB%', 'GB%', 'HR/FB']
# Convert columns
df[col_change2] = df[col_change2].replace('%', '', regex = True).astype('float')/100
# Check results
df.head()
###Output
_____no_output_____
###Markdown
By cross-referencing with the previous header, we see that we have successfully converted those percentage-objects to floats.Another large decision we will need to make is what to do with our null values. We saw that some of our columns have null values for greater tha 10% of the rows. Some are missing more than half. Given that the all columns with null values have at least 60 out of a total of 450 rows, each of these columns will be dropped. We can also drop the wins ('W') and losses ('L') columns because they are unnecessary now that we have created win percentage.
###Code
# Drop columns with null values
df.dropna(axis=1, inplace = True)
# Drop wins and losses
df.drop(['W', 'L'], axis=1, inplace = True)
###Output
_____no_output_____
###Markdown
Now that we've dropped those, let's also update the Tampa Bay Rays/Devil Rays. We will rename the Devil Rays to the Rays (just as in real life), so that it does not appear that there are 31 franchises in MLB.
###Code
# Replace Devil Rays with Rays
df['Team'].replace('Devil Rays', 'Rays', inplace = True)
# Correct TeamYear column as well
df['TeamYear'] = df['Season'].astype(str) + '_' + df['Team']
# Check work
df[df['Team'].str.contains('Rays')]
###Output
_____no_output_____
###Markdown
IV. Data Exploration - Part 2We can now proceed with more data exploration. Since our target variable is win percentage, let's take a look at the comparison of different teams' win percentage.
###Code
# Graph win percentage by team
plt.figure(figsize = (30, 20))
aplot = sns.boxplot(x = 'Team', y = 'W%', data = df)
aplot.axes.set_title('Box Plot of Win Percentage by MLB Team, 2006-2020', fontsize=30)
aplot.set_xlabel('Team', fontsize=20)
aplot.set_ylabel('Win Percentage (W%)', fontsize=20)
aplot.figure.savefig('BoxPlotWPTeam.png', bbox_inches = 'tight')
###Output
_____no_output_____
###Markdown
Some interesting trends we see here:* Most teams have had a wide range of win percentages, indicating they had some good seasons and some bad seasons.* The Yankees, Dodgers, and Cardinals were more consistent, as they have smaller spreads than most teams. * They also were consistently good teams, as they rarely had a win percentage below 0.5. The Dodgers even had an outlier season with a win percentage above 0.7.* The Astros, Rays, Tigers, and Orioles have very large spreads, indicating their season win percentage has had more variability. * As an Orioles fan, I can sadly vouch for this. From 2006-2011, watching the Orioles was painful indeed, but 2012-2016 was much better until the team regressed again in 2017 and beyond. Let's look at the box plots and histograms of more features.
###Code
# Graphs settings for multiple features
plt.figure(figsize=(20, 15))
# Graphs of features
plt.subplot(3,3,1)
sns.boxplot(y = df['PA'], palette = 'Set1')
plt.subplot(3,3,2)
sns.boxplot(y = df['HR'], palette = 'Set2')
plt.subplot(3,3,3)
sns.boxplot(y = df['R'], palette = 'Set1_r')
plt.subplot(3,3,4)
sns.boxplot(y = df['RBI'], palette = 'Set2_r')
plt.subplot(3,3,5)
sns.boxplot(y = df['SB'], palette = 'cividis')
plt.subplot(3,3,6)
sns.boxplot(y = df['BB%'], palette = 'winter_r')
plt.subplot(3,3,7)
sns.boxplot(y = df['K%'],palette = 'Spectral')
plt.subplot(3,3,8)
sns.boxplot(y = df['ISO'], palette = 'cool')
plt.subplot(3,3,9)
sns.boxplot(y = df['BABIP_x'], palette = 'coolwarm')
# Graphs settings for multiple features
plt.figure(figsize=(20, 15))
# Graphs of features
plt.subplot(3,3,1)
sns.distplot(df['PA'], bins = 10)
plt.subplot(3,3,2)
sns.distplot(df['HR'], bins = 10)
plt.subplot(3,3,3)
sns.distplot(df['R'], bins = 10)
plt.subplot(3,3,4)
sns.distplot(df['RBI'], bins = 10)
plt.subplot(3,3,5)
sns.distplot(df['SB'], bins = 10)
plt.subplot(3,3,6)
sns.distplot(df['BB%'], bins = 10)
plt.subplot(3,3,7)
sns.distplot(df['K%'],bins = 10)
plt.subplot(3,3,8)
sns.distplot(df['ISO'], bins = 10)
plt.subplot(3,3,9)
sns.distplot(df['BABIP_x'], bins = 10)
###Output
_____no_output_____
###Markdown
From the box plots above, we see a number of interesting trends.
###Code
# Graphs settings for multiple features
plt.figure(figsize=(20, 15))
# Graphs of features
plt.subplot(3,3,1)
sns.boxplot(y = df['AVG'], palette = 'Set1')
plt.subplot(3,3,2)
sns.boxplot(y = df['OBP'], palette = 'Set2')
plt.subplot(3,3,3)
sns.boxplot(y = df['SLG'], palette = 'Set1_r')
plt.subplot(3,3,4)
sns.boxplot(y = df['wOBA'], palette = 'Set2_r')
plt.subplot(3,3,5)
sns.boxplot(y = df['wRC+'], palette = 'cividis')
plt.subplot(3,3,6)
sns.boxplot(y = df['BsR'], palette = 'winter_r')
plt.subplot(3,3,7)
sns.boxplot(y = df['Off'],palette = 'Spectral')
plt.subplot(3,3,8)
sns.boxplot(y = df['Def_x'], palette = 'cool')
plt.subplot(3,3,9)
sns.boxplot(y = df['WAR_x'], palette = 'coolwarm')
# Graphs settings for multiple features
plt.figure(figsize=(20, 15))
# Graphs of features
plt.subplot(3,3,1)
sns.distplot(df['AVG'], bins = 10)
plt.subplot(3,3,2)
sns.distplot(df['OBP'], bins = 10)
plt.subplot(3,3,3)
sns.distplot(df['SLG'], bins = 10)
plt.subplot(3,3,4)
sns.distplot(df['wOBA'], bins = 10)
plt.subplot(3,3,5)
sns.distplot(df['wRC+'], bins = 10)
plt.subplot(3,3,6)
sns.distplot(df['BsR'], bins = 10)
plt.subplot(3,3,7)
sns.distplot(df['Off'],bins = 10)
plt.subplot(3,3,8)
sns.distplot(df['Def_x'], bins = 10)
plt.subplot(3,3,9)
sns.distplot(df['WAR_x'], bins = 10)
# Graphs settings for multiple features
plt.figure(figsize=(20, 15))
# Graphs of features
plt.subplot(3,3,1)
sns.boxplot(y = df['AVG'], palette = 'Set1')
plt.subplot(3,3,2)
sns.boxplot(y = df['OBP'], palette = 'Set2')
plt.subplot(3,3,3)
sns.boxplot(y = df['SLG'], palette = 'Set1_r')
plt.subplot(3,3,4)
sns.boxplot(y = df['wOBA'], palette = 'Set2_r')
plt.subplot(3,3,5)
sns.boxplot(y = df['wRC+'], palette = 'cividis')
plt.subplot(3,3,6)
sns.boxplot(y = df['BsR'], palette = 'winter_r')
plt.subplot(3,3,7)
sns.boxplot(y = df['Off'],palette = 'Spectral')
plt.subplot(3,3,8)
sns.boxplot(y = df['Def_x'], palette = 'cool')
plt.subplot(3,3,9)
sns.boxplot(y = df['WAR_x'], palette = 'coolwarm')
# Graphs settings for multiple features
plt.figure(figsize=(20, 15))
# Graphs of features
plt.subplot(3,3,1)
sns.distplot(df['AVG'], bins = 10)
plt.subplot(3,3,2)
sns.distplot(df['OBP'], bins = 10)
plt.subplot(3,3,3)
sns.distplot(df['SLG'], bins = 10)
plt.subplot(3,3,4)
sns.distplot(df['wOBA'], bins = 10)
plt.subplot(3,3,5)
sns.distplot(df['wRC+'], bins = 10)
plt.subplot(3,3,6)
sns.distplot(df['BsR'], bins = 10)
plt.subplot(3,3,7)
sns.distplot(df['Off'],bins = 10)
plt.subplot(3,3,8)
sns.distplot(df['Def_x'], bins = 10)
plt.subplot(3,3,9)
sns.distplot(df['WAR_x'], bins = 10)
###Output
_____no_output_____
###Markdown
Let's view correlations with win percentage. We will also use a correlation heatmap to display this.
###Code
# Check correlations with win percentage
cor_matrix = df.corr()
print(cor_matrix['W%'].sort_values(ascending = False))
# Create object for graph
win_cor = df.corr()
# Remove half of heatmap
mask = np.zeros(win_cor.shape, dtype = bool)
mask[np.triu_indices(len(mask))] = True
# Graph correlation heatmap
plt.subplots(figsize = (25,20))
cor_heatmap = sns.heatmap(win_cor, xticklabels = win_cor.columns, yticklabels = win_cor.columns, cmap = 'coolwarm', mask = mask)
cor_heatmap.set_title('Correlation Heatmap of Original Dataset', fontdict={'fontsize':18}, pad=12)
plt.savefig('Heatmap1.png', bbox_inches = 'tight')
###Output
_____no_output_____
###Markdown
We see a number of strong correlations between features here, such as slugging percentage (SLG) with runs (R) and runs batted in (RBI), which makes sense. If you hit the ball well and get on base, you'll score more runs. We see a strong negative correlation between strike outs per 9 innings (K/9) with balls in zone (BIZ) and plays made (Plays). This also makes sense, as a team that strikes batters out will have to field fewer balls hit by batters.In terms of correlations with win percentage (W%), we see the strongest positive correlation with offensive wins above replacement (WAR_x). This is expected, because if your players are batting farther above a given level (an average replacement player), you will likely have more wins. We see the strongest negative correlation with earned run average (ERA). This is also expected, because if you allow opposing teams to score fewer runs against you, you have a better chance of outscoring them for a win. V. Data Cleaning - Part 2Given that we have explored the data as well as some correlations, we should start to remove some features to assist us in developing our model. We will do this through a few ways: first, we will choose to remove features that describe elements of the game rather than aspects which contribute to scoring or a lack thereof. As an example, plate appearances (PA) is a feature that describes how many at-bats a team has. Its positive correlation with win percentage would suggest that with more plate appearances, you are more likely to win. However, correlation does not equal causation. You earn more plate appearances through getting on base and avoiding outs. That's why plate appearances also has a strong positive correlation with batting average (AVG) and on base percentage (OBP). Thus, plate appearances is likely not useful in determining how a team wins, but is a byproduct of other features which contribute to winning. Similarly, earned run average (ERA) is not helpful in explaining how to stymie an opponent's scoring; it is a metric of the extent to which an opponent is able/unable to score against that team.We should remove features like plate appearances and earned run average, as well as other features that explain elements of the game, such as innings played (Inn) and innings pitched (IP).
###Code
# Drop unnecessary columns
df.drop(['PA', 'ERA', 'FIP', 'Inn', 'IP', 'LOB%'], axis=1, inplace = True)
###Output
_____no_output_____
###Markdown
The second way we will narrow down the dataset is through recursive feature elimination (RFE). RFE essentially builds a model with every feature, then weighs each feature's importance. This allows us, through a process of elimination, to more easily narrow down the features that maximize the strength of the model while minimizing its cost.
###Code
# import tools
from sklearn.feature_selection import RFE
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
# Create dataset for Y
y_data = df['W%']
# Create dataset for X, removing target variable and non-numerical features
x_data = df.drop(['W%', 'TeamYear', 'Team'], axis = 1)
# Separate data into train and test groups using 25% of samples in test group
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size = 0.25, random_state = 100)
# Report number of samples in each group
print('The number of training samples is:', x_train.shape[0])
print('The number of test samples is:', x_test.shape[0])
# Create model for RFE
lr = LinearRegression()
# Fit the model for RFE
lr.fit(x_train, y_train)
# Use Recursive Feature Elimination
rfe = RFE(lr, 15)
rfe = rfe.fit(x_train, y_train)
# Print list of features that show if they are in the top 15 features with boolean test
print('Is a given feature in the best 15 as determined by RFE?:')
print(list(zip(x_train.columns, rfe.support_, rfe.ranking_)))
###Output
The number of training samples is: 337
The number of test samples is: 113
Is a given feature in the best 15 as determined by RFE?:
[('Season', False, 15), ('G_x', False, 35), ('HR', True, 1), ('R', False, 2), ('RBI', False, 4), ('SB', False, 12), ('BB%', True, 1), ('K%', False, 5), ('ISO', True, 1), ('BABIP_x', True, 1), ('AVG', True, 1), ('OBP', True, 1), ('SLG', True, 1), ('wOBA', True, 1), ('wRC+', False, 20), ('BsR', False, 25), ('Off', False, 26), ('Def_x', False, 21), ('WAR_x', False, 14), ('rSZ', False, 30), ('rCERA', False, 16), ('rSB', False, 10), ('rGDP', False, 31), ('rARM', False, 27), ('rGFP', False, 19), ('rPM', False, 28), ('DRS', False, 29), ('BIZ', True, 1), ('Plays', True, 1), ('RZR', True, 1), ('OOZ', False, 34), ('ARM', False, 18), ('DPR', False, 22), ('RngR', False, 23), ('ErrR', False, 24), ('UZR', False, 17), ('UZR/150', False, 9), ('Def_y', False, 33), ('SV', False, 8), ('G_y', False, 32), ('GS', False, 11), ('K/9', False, 6), ('BB/9', False, 3), ('HR/9', True, 1), ('BABIP_y', True, 1), ('GB%', True, 1), ('HR/FB', True, 1), ('xFIP', False, 7), ('WAR_y', False, 13)]
###Markdown
Let's make this more presentable by identifying the 15 columns selected by RFE.
###Code
# Best 15 features as determined by RFE
sig_cols = x_train.columns[rfe.support_]
sig_cols
###Output
_____no_output_____
###Markdown
Now that we have identified the 15 features we will examine further in the modeling notebook, let's turn them into a new dataset and save it. We should also include the target variable and the identifying features.
###Code
df2 = df[['TeamYear', 'Season', 'Team', 'W%', 'HR', 'BB%', 'ISO', 'BABIP_x', 'AVG', 'OBP', 'SLG', 'wOBA', 'BIZ',
'Plays', 'RZR', 'HR/9', 'BABIP_y', 'GB%', 'HR/FB']]
df2.head()
# Save cleaned dataset to CSV
df2.to_csv(r'~/Github/DATA601_FinalProject/Data/TeamStats_Combined_Cleaned.csv', index = None)
###Output
_____no_output_____ |
appyters/COSMIC_Mutations_Harmonizome_ETL/COSMIC (Mutations).ipynb | ###Markdown
Harmonizome ETL: Catalogue of Somatic Mutations In Cancer (COSMIC) - Mutations Created by: Charles Dai Credit to: Moshe SilversteinData Source: https://cancer.sanger.ac.uk/cosmic/download
###Code
# appyter init
from appyter import magic
magic.init(lambda _=globals: _())
import sys
import os
from datetime import date
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import harmonizome.utility_functions as uf
import harmonizome.lookup as lookup
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Notebook Information
###Code
print('This notebook was run on:', date.today(), '\nPython version:', sys.version)
###Output
_____no_output_____
###Markdown
Initialization
###Code
%%appyter hide_code
{% do SectionField(
name='data',
title='Upload Data',
img='load_icon.png'
) %}
%%appyter code_eval
{% do DescriptionField(
name='description',
text='The example below was sourced from <a href="https://cancer.sanger.ac.uk/cosmic/download" target="_blank">cancer.sanger.ac.uk/cosmic</a>. The download requires a login so clicking on the example may not work, in which case it should be downloaded directly from the source website.',
section='data'
) %}
{% set df_file = FileField(
constraint='.*\.tsv.gz$',
name='mutation_data',
label='Mutation Data (tsv.gz)',
default='CosmicCompleteTargetedScreensMutantExport.tsv.gz',
examples={
'CosmicCompleteTargetedScreensMutantExport.tsv.gz': 'https://cancer.sanger.ac.uk/cosmic/file_download/GRCh38/cosmic/v91/CosmicCompleteTargetedScreensMutantExport.tsv.gz'
},
section='data'
) %}
###Output
_____no_output_____
###Markdown
Load Mapping Dictionaries
###Code
symbol_lookup, geneid_lookup = lookup.get_lookups()
###Output
_____no_output_____
###Markdown
Output Path
###Code
output_name = 'cosmic_mutations'
path = 'Output/COSMIC-Mutations'
if not os.path.exists(path):
os.makedirs(path)
###Output
_____no_output_____
###Markdown
Load Data
###Code
%%appyter code_exec
df = pd.read_csv(
{{df_file}},
sep='\t', usecols=['Gene name', 'Sample name'], index_col=0)
df.head()
df.shape
###Output
_____no_output_____
###Markdown
Pre-process Data Group Mutations of Same Gene Together
###Code
df.index = df.index.map(lambda s: s.split('_')[0])
df.index.name = 'Gene Symbol'
df.columns = ['Sample Name']
df.head()
###Output
_____no_output_____
###Markdown
Filter Data Map Gene Symbols to Up-to-date Approved Gene Symbols
###Code
df = uf.map_symbols(df, symbol_lookup, remove_duplicates=True)
df.shape
###Output
_____no_output_____
###Markdown
Analyze Data Create Binary Matrix
###Code
binary_matrix = uf.binary_matrix(df)
binary_matrix.head()
binary_matrix.shape
uf.save_data(binary_matrix, path, output_name + '_binary_matrix',
compression='npz', dtype=np.uint8)
###Output
_____no_output_____
###Markdown
Create Gene List
###Code
gene_list = uf.gene_list(binary_matrix, geneid_lookup)
gene_list.head()
gene_list.shape
uf.save_data(gene_list, path, output_name + '_gene_list',
ext='tsv', compression='gzip', index=False)
###Output
_____no_output_____
###Markdown
Create Attribute List
###Code
attribute_list = uf.attribute_list(binary_matrix)
attribute_list.head()
attribute_list.shape
uf.save_data(attribute_list, path, output_name + '_attribute_list',
ext='tsv', compression='gzip')
###Output
_____no_output_____
###Markdown
Create Gene and Attribute Set Libraries
###Code
uf.save_setlib(binary_matrix, 'gene', 'up', path, output_name + '_gene_up_set')
uf.save_setlib(binary_matrix, 'attribute', 'up', path,
output_name + '_attribute_up_set')
###Output
_____no_output_____
###Markdown
Create Attribute Similarity Matrix
###Code
attribute_similarity_matrix = uf.similarity_matrix(binary_matrix.T, 'jaccard', sparse=True)
attribute_similarity_matrix.head()
uf.save_data(attribute_similarity_matrix, path,
output_name + '_attribute_similarity_matrix',
compression='npz', symmetric=True, dtype=np.float32)
###Output
_____no_output_____
###Markdown
Create Gene Similarity Matrix
###Code
gene_similarity_matrix = uf.similarity_matrix(binary_matrix, 'jaccard', sparse=True)
gene_similarity_matrix.head()
uf.save_data(gene_similarity_matrix, path,
output_name + '_gene_similarity_matrix',
compression='npz', symmetric=True, dtype=np.float32)
###Output
_____no_output_____
###Markdown
Create Gene-Attribute Edge List
###Code
edge_list = uf.edge_list(binary_matrix)
uf.save_data(edge_list, path, output_name + '_edge_list',
ext='tsv', compression='gzip')
###Output
_____no_output_____
###Markdown
Create Downloadable Save File
###Code
uf.archive(path)
###Output
_____no_output_____
###Markdown
Harmonizome ETL: Catalogue of Somatic Mutations In Cancer (COSMIC) - Mutations Created by: Charles Dai Credit to: Moshe SilversteinData Source: https://cancer.sanger.ac.uk/cosmic/download
###Code
#%%appyter init
from appyter import magic
magic.init(lambda _=globals: _())
import sys
import os
from datetime import date
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import harmonizome.utility_functions as uf
import harmonizome.lookup as lookup
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Notebook Information
###Code
print('This notebook was run on:', date.today(), '\nPython version:', sys.version)
###Output
_____no_output_____
###Markdown
Initialization
###Code
%%appyter hide_code
{% do SectionField(
name='data',
title='Upload Data',
img='load_icon.png'
) %}
%%appyter code_eval
{% do DescriptionField(
name='description',
text='The example below was sourced from <a href="https://cancer.sanger.ac.uk/cosmic/download" target="_blank">cancer.sanger.ac.uk/cosmic</a>. The download requires a login so clicking on the example may not work, in which case it should be downloaded directly from the source website.',
section='data'
) %}
{% set df_file = FileField(
constraint='.*\.tsv.gz$',
name='mutation_data',
label='Mutation Data (tsv.gz)',
default='CosmicCompleteTargetedScreensMutantExport.tsv.gz',
examples={
'CosmicCompleteTargetedScreensMutantExport.tsv.gz': 'https://cancer.sanger.ac.uk/cosmic/file_download/GRCh38/cosmic/v91/CosmicCompleteTargetedScreensMutantExport.tsv.gz'
},
section='data'
) %}
###Output
_____no_output_____
###Markdown
Load Mapping Dictionaries
###Code
symbol_lookup, geneid_lookup = lookup.get_lookups()
###Output
_____no_output_____
###Markdown
Output Path
###Code
output_name = 'cosmic_mutations'
path = 'Output/COSMIC-Mutations'
if not os.path.exists(path):
os.makedirs(path)
###Output
_____no_output_____
###Markdown
Load Data
###Code
%%appyter code_exec
df = pd.read_csv(
{{df_file}},
sep='\t', usecols=['Gene name', 'Sample name'], index_col=0)
df.head()
df.shape
###Output
_____no_output_____
###Markdown
Pre-process Data Group Mutations of Same Gene Together
###Code
df.index = df.index.map(lambda s: s.split('_')[0])
df.index.name = 'Gene Symbol'
df.columns = ['Sample Name']
df.head()
###Output
_____no_output_____
###Markdown
Filter Data Map Gene Symbols to Up-to-date Approved Gene Symbols
###Code
df = uf.map_symbols(df, symbol_lookup, remove_duplicates=True)
df.shape
###Output
_____no_output_____
###Markdown
Analyze Data Create Binary Matrix
###Code
binary_matrix = uf.binary_matrix(df)
binary_matrix.head()
binary_matrix.shape
uf.save_data(binary_matrix, path, output_name + '_binary_matrix',
compression='npz', dtype=np.uint8)
###Output
_____no_output_____
###Markdown
Create Gene List
###Code
gene_list = uf.gene_list(binary_matrix, geneid_lookup)
gene_list.head()
gene_list.shape
uf.save_data(gene_list, path, output_name + '_gene_list',
ext='tsv', compression='gzip', index=False)
###Output
_____no_output_____
###Markdown
Create Attribute List
###Code
attribute_list = uf.attribute_list(binary_matrix)
attribute_list.head()
attribute_list.shape
uf.save_data(attribute_list, path, output_name + '_attribute_list',
ext='tsv', compression='gzip')
###Output
_____no_output_____
###Markdown
Create Gene and Attribute Set Libraries
###Code
uf.save_setlib(binary_matrix, 'gene', 'up', path, output_name + '_gene_up_set')
uf.save_setlib(binary_matrix, 'attribute', 'up', path,
output_name + '_attribute_up_set')
###Output
_____no_output_____
###Markdown
Create Attribute Similarity Matrix
###Code
attribute_similarity_matrix = uf.similarity_matrix(binary_matrix.T, 'jaccard', sparse=True)
attribute_similarity_matrix.head()
uf.save_data(attribute_similarity_matrix, path,
output_name + '_attribute_similarity_matrix',
compression='npz', symmetric=True, dtype=np.float32)
###Output
_____no_output_____
###Markdown
Create Gene Similarity Matrix
###Code
gene_similarity_matrix = uf.similarity_matrix(binary_matrix, 'jaccard', sparse=True)
gene_similarity_matrix.head()
uf.save_data(gene_similarity_matrix, path,
output_name + '_gene_similarity_matrix',
compression='npz', symmetric=True, dtype=np.float32)
###Output
_____no_output_____
###Markdown
Create Gene-Attribute Edge List
###Code
edge_list = uf.edge_list(binary_matrix)
uf.save_data(edge_list, path, output_name + '_edge_list',
ext='tsv', compression='gzip')
###Output
_____no_output_____
###Markdown
Create Downloadable Save File
###Code
uf.archive(path)
###Output
_____no_output_____
###Markdown
Harmonizome ETL: Catalogue of Somatic Mutations In Cancer (COSMIC) - Mutations Created by: Charles Dai Credit to: Moshe SilversteinData Source: https://cancer.sanger.ac.uk/cosmic/download
###Code
# appyter init
from appyter import magic
magic.init(lambda _=globals: _())
import sys
import os
from datetime import date
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import harmonizome.utility_functions as uf
import harmonizome.lookup as lookup
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Notebook Information
###Code
print('This notebook was run on:', date.today(), '\nPython version:', sys.version)
###Output
_____no_output_____
###Markdown
Initialization
###Code
%%appyter hide_code
{% do SectionField(
name='data',
title='Upload Data',
img='load_icon.png'
) %}
%%appyter code_eval
{% do DescriptionField(
name='description',
text='The example below was sourced from <a href="https://cancer.sanger.ac.uk/cosmic/download" target="_blank">cancer.sanger.ac.uk/cosmic</a>. The download requires a login so clicking on the example may not work, in which case it should be downloaded directly from the source website.',
section='data'
) %}
{% set df_file = FileField(
constraint='.*\.tsv.gz$',
name='mutation_data',
label='Mutation Data (tsv.gz)',
default='CosmicCompleteTargetedScreensMutantExport.tsv.gz',
examples={
'CosmicCompleteTargetedScreensMutantExport.tsv.gz': 'https://cancer.sanger.ac.uk/cosmic/file_download/GRCh38/cosmic/v91/CosmicCompleteTargetedScreensMutantExport.tsv.gz'
},
section='data'
) %}
###Output
_____no_output_____
###Markdown
Load Mapping Dictionaries
###Code
symbol_lookup, geneid_lookup = lookup.get_lookups()
###Output
_____no_output_____
###Markdown
Output Path
###Code
output_name = 'cosmic_mutations'
path = 'Output/COSMIC-Mutations'
if not os.path.exists(path):
os.makedirs(path)
###Output
_____no_output_____
###Markdown
Load Data
###Code
%%appyter code_exec
df = pd.read_csv(
{{df_file}},
sep='\t', usecols=['Gene name', 'Sample name'], index_col=0)
df.head()
df.shape
###Output
_____no_output_____
###Markdown
Pre-process Data Group Mutations of Same Gene Together
###Code
df.index = df.index.map(lambda s: s.split('_')[0])
df.index.name = 'Gene Symbol'
df.columns = ['Sample Name']
df.head()
###Output
_____no_output_____
###Markdown
Filter Data Map Gene Symbols to Up-to-date Approved Gene Symbols
###Code
df = uf.map_symbols(df, symbol_lookup, remove_duplicates=True)
df.shape
###Output
_____no_output_____
###Markdown
Analyze Data Create Binary Matrix
###Code
binary_matrix = uf.binary_matrix(df)
binary_matrix.head()
binary_matrix.shape
uf.save_data(binary_matrix, path, output_name + '_binary_matrix',
compression='npz', dtype=np.uint8)
###Output
_____no_output_____
###Markdown
Create Gene List
###Code
gene_list = uf.gene_list(binary_matrix, geneid_lookup)
gene_list.head()
gene_list.shape
uf.save_data(gene_list, path, output_name + '_gene_list',
ext='tsv', compression='gzip', index=False)
###Output
_____no_output_____
###Markdown
Create Attribute List
###Code
attribute_list = uf.attribute_list(binary_matrix)
attribute_list.head()
attribute_list.shape
uf.save_data(attribute_list, path, output_name + '_attribute_list',
ext='tsv', compression='gzip')
###Output
_____no_output_____
###Markdown
Create Gene and Attribute Set Libraries
###Code
uf.save_setlib(binary_matrix, 'gene', 'up', path, output_name + '_gene_up_set')
uf.save_setlib(binary_matrix, 'attribute', 'up', path,
output_name + '_attribute_up_set')
###Output
_____no_output_____
###Markdown
Create Attribute Similarity Matrix
###Code
attribute_similarity_matrix = uf.similarity_matrix(binary_matrix.T, 'jaccard', sparse=True)
attribute_similarity_matrix.head()
uf.save_data(attribute_similarity_matrix, path,
output_name + '_attribute_similarity_matrix',
compression='npz', symmetric=True, dtype=np.float32)
###Output
_____no_output_____
###Markdown
Create Gene Similarity Matrix
###Code
gene_similarity_matrix = uf.similarity_matrix(binary_matrix, 'jaccard', sparse=True)
gene_similarity_matrix.head()
uf.save_data(gene_similarity_matrix, path,
output_name + '_gene_similarity_matrix',
compression='npz', symmetric=True, dtype=np.float32)
###Output
_____no_output_____
###Markdown
Create Gene-Attribute Edge List
###Code
edge_list = uf.edge_list(binary_matrix)
uf.save_data(edge_list, path, output_name + '_edge_list',
ext='tsv', compression='gzip')
###Output
_____no_output_____
###Markdown
Create Downloadable Save File
###Code
uf.archive(path)
###Output
_____no_output_____ |
scikit-learn-official-examples/model_selection/plot_nested_cross_validation_iris.ipynb | ###Markdown
Nested versus non-nested cross-validationThis example compares non-nested and nested cross-validation strategies on aclassifier of the iris data set. Nested cross-validation (CV) is often used totrain a model in which hyperparameters also need to be optimized. Nested CVestimates the generalization error of the underlying model and its(hyper)parameter search. Choosing the parameters that maximize non-nested CVbiases the model to the dataset, yielding an overly-optimistic score.Model selection without nested CV uses the same data to tune model parametersand evaluate model performance. Information may thus "leak" into the modeland overfit the data. The magnitude of this effect is primarily dependent onthe size of the dataset and the stability of the model. See Cawley and Talbot[1]_ for an analysis of these issues.To avoid this problem, nested CV effectively uses a series oftrain/validation/test set splits. In the inner loop (here executed by:class:`GridSearchCV `), the score isapproximately maximized by fitting a model to each training set, and thendirectly maximized in selecting (hyper)parameters over the validation set. Inthe outer loop (here in :func:`cross_val_score`), generalization error is estimatedby averaging test set scores over several dataset splits.The example below uses a support vector classifier with a non-linear kernel tobuild a model with optimized hyperparameters by grid search. We compare theperformance of non-nested and nested CV strategies by taking the differencebetween their scores... topic:: See Also: - `cross_validation` - `grid_search`.. topic:: References: .. [1] `Cawley, G.C.; Talbot, N.L.C. On over-fitting in model selection and subsequent selection bias in performance evaluation. J. Mach. Learn. Res 2010,11, 2079-2107. `_
###Code
from sklearn.datasets import load_iris
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold
import numpy as np
print(__doc__)
# Number of random trials
NUM_TRIALS = 30
# Load the dataset
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
# Set up possible values of parameters to optimize over
p_grid = {"C": [1, 10, 100],
"gamma": [.01, .1]}
# We will use a Support Vector Classifier with "rbf" kernel
svm = SVC(kernel="rbf")
# Arrays to store scores
non_nested_scores = np.zeros(NUM_TRIALS)
nested_scores = np.zeros(NUM_TRIALS)
# Loop for each trial
for i in range(NUM_TRIALS):
# Choose cross-validation techniques for the inner and outer loops,
# independently of the dataset.
# E.g "LabelKFold", "LeaveOneOut", "LeaveOneLabelOut", etc.
inner_cv = KFold(n_splits=4, shuffle=True, random_state=i)
outer_cv = KFold(n_splits=4, shuffle=True, random_state=i)
# Non_nested parameter search and scoring
clf = GridSearchCV(estimator=svm, param_grid=p_grid, cv=inner_cv)
clf.fit(X_iris, y_iris)
non_nested_scores[i] = clf.best_score_
# Nested CV with parameter optimization
nested_score = cross_val_score(clf, X=X_iris, y=y_iris, cv=outer_cv)
nested_scores[i] = nested_score.mean()
score_difference = non_nested_scores - nested_scores
print("Average difference of {0:6f} with std. dev. of {1:6f}."
.format(score_difference.mean(), score_difference.std()))
# Plot scores on each trial for nested and non-nested CV
plt.figure()
plt.subplot(211)
non_nested_scores_line, = plt.plot(non_nested_scores, color='r')
nested_line, = plt.plot(nested_scores, color='b')
plt.ylabel("score", fontsize="14")
plt.legend([non_nested_scores_line, nested_line],
["Non-Nested CV", "Nested CV"],
bbox_to_anchor=(0, .4, .5, 0))
plt.title("Non-Nested and Nested Cross Validation on Iris Dataset",
x=.5, y=1.1, fontsize="15")
# Plot bar chart of the difference.
plt.subplot(212)
difference_plot = plt.bar(range(NUM_TRIALS), score_difference)
plt.xlabel("Individual Trial #")
plt.legend([difference_plot],
["Non-Nested CV - Nested CV Score"],
bbox_to_anchor=(0, 1, .8, 0))
plt.ylabel("score difference", fontsize="14")
plt.show()
###Output
_____no_output_____ |
notebooks/OTF Development Tests/2D OTF Development Notebook.ipynb | ###Markdown
2D OTF DevelopmentFor all types of OTFs and PSFs if we don't want to take into account vectorial effects we can calculate the 2D OTF or PSF _only_ and interpolate it out to a full 3D extent, if requested. This should offer huge speed gains by taking advantage of the symmetry of the problem.We should also be able to make use of real valued fft's (`rfftn` and `irfftn`) to speed up the computation even further.
###Code
%pylab inline
from pyotf.utils import *
x = linspace(-1,1,256)
xx, yy = meshgrid(x, x)
r = hypot(xx, yy)
coh_otf = logical_and(abs(r-0.5) < x[1]-x[0], yy > 0.3)
matshow(coh_otf)
coh_psf = easy_ifft(coh_otf)
iotf = easy_fft(abs(coh_psf) ** 2)
from dphplotting import slice_plot, mip
mip(log((iotf.real + np.finfo(iotf.dtype).resolution)))
###Output
_____no_output_____
###Markdown
2D OTF DevelopmentFor all types of OTFs and PSFs if we don't want to take into account vectorial effects we can calculate the 2D OTF or PSF _only_ and interpolate it out to a full 3D extent, if requested. This should offer huge speed gains by taking advantage of the symmetry of the problem.We should also be able to make use of real valued fft's (`rfftn` and `irfftn`) to speed up the computation even further.
###Code
%pylab inline
from pyotf.utils import *
x = linspace(-1,1,256)
xx, yy = meshgrid(x, x)
r = hypot(xx, yy)
coh_otf = logical_and(abs(r-0.5) < x[1]-x[0], yy > 0.3)
matshow(coh_otf)
coh_psf = easy_ifft(coh_otf)
iotf = easy_fft(abs(coh_psf) ** 2)
from dphtools.display import slice_plot, mip
mip(log((iotf.real + np.finfo(iotf.dtype).resolution)))
###Output
_____no_output_____ |
Deep+Neural+Network+-+Application+v8.ipynb | ###Markdown
Deep Neural Network for Image Classification: ApplicationWhen you finish this, you will have finished the last programming assignment of Week 4, and also the last programming assignment of this course! You will use use the functions you'd implemented in the previous assignment to build a deep network, and apply it to cat vs non-cat classification. Hopefully, you will see an improvement in accuracy relative to your previous logistic regression implementation. **After this assignment you will be able to:**- Build and apply a deep neural network to supervised learning. Let's get started! 1 - Packages Let's first import all the packages that you will need during this assignment. - [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.- dnn_app_utils provides the functions implemented in the "Building your Deep Neural Network: Step by Step" assignment to this notebook.- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.
###Code
import time
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
from dnn_app_utils_v3 import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
###Output
The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload
###Markdown
2 - DatasetYou will use the same "Cat vs non-Cat" dataset as in "Logistic Regression as a Neural Network" (Assignment 2). The model you had built had 70% test accuracy on classifying cats vs non-cats images. Hopefully, your new model will perform a better!**Problem Statement**: You are given a dataset ("data.h5") containing: - a training set of m_train images labelled as cat (1) or non-cat (0) - a test set of m_test images labelled as cat and non-cat - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB).Let's get more familiar with the dataset. Load the data by running the cell below.
###Code
train_x_orig, train_y, test_x_orig, test_y, classes = load_data()
###Output
_____no_output_____
###Markdown
The following code will show you an image in the dataset. Feel free to change the index and re-run the cell multiple times to see other images.
###Code
# Example of a picture
index = 10
plt.imshow(train_x_orig[index])
print ("y = " + str(train_y[0,index]) + ". It's a " + classes[train_y[0,index]].decode("utf-8") + " picture.")
# Explore your dataset
m_train = train_x_orig.shape[0]
num_px = train_x_orig.shape[1]
m_test = test_x_orig.shape[0]
print ("Number of training examples: " + str(m_train))
print ("Number of testing examples: " + str(m_test))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_x_orig shape: " + str(train_x_orig.shape))
print ("train_y shape: " + str(train_y.shape))
print ("test_x_orig shape: " + str(test_x_orig.shape))
print ("test_y shape: " + str(test_y.shape))
###Output
Number of training examples: 209
Number of testing examples: 50
Each image is of size: (64, 64, 3)
train_x_orig shape: (209, 64, 64, 3)
train_y shape: (1, 209)
test_x_orig shape: (50, 64, 64, 3)
test_y shape: (1, 50)
###Markdown
As usual, you reshape and standardize the images before feeding them to the network. The code is given in the cell below. Figure 1: Image to vector conversion.
###Code
# Reshape the training and test examples
train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T # The "-1" makes reshape flatten the remaining dimensions
test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T
# Standardize data to have feature values between 0 and 1.
train_x = train_x_flatten/255.
test_x = test_x_flatten/255.
print ("train_x's shape: " + str(train_x.shape))
print ("test_x's shape: " + str(test_x.shape))
###Output
train_x's shape: (12288, 209)
test_x's shape: (12288, 50)
###Markdown
$12,288$ equals $64 \times 64 \times 3$ which is the size of one reshaped image vector. 3 - Architecture of your model Now that you are familiar with the dataset, it is time to build a deep neural network to distinguish cat images from non-cat images.You will build two different models:- A 2-layer neural network- An L-layer deep neural networkYou will then compare the performance of these models, and also try out different values for $L$. Let's look at the two architectures. 3.1 - 2-layer neural network Figure 2: 2-layer neural network. The model can be summarized as: ***INPUT -> LINEAR -> RELU -> LINEAR -> SIGMOID -> OUTPUT***. Detailed Architecture of figure 2:- The input is a (64,64,3) image which is flattened to a vector of size $(12288,1)$. - The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ of size $(n^{[1]}, 12288)$.- You then add a bias term and take its relu to get the following vector: $[a_0^{[1]}, a_1^{[1]},..., a_{n^{[1]}-1}^{[1]}]^T$.- You then repeat the same process.- You multiply the resulting vector by $W^{[2]}$ and add your intercept (bias). - Finally, you take the sigmoid of the result. If it is greater than 0.5, you classify it to be a cat. 3.2 - L-layer deep neural networkIt is hard to represent an L-layer deep neural network with the above representation. However, here is a simplified network representation: Figure 3: L-layer neural network. The model can be summarized as: ***[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID***Detailed Architecture of figure 3:- The input is a (64,64,3) image which is flattened to a vector of size (12288,1).- The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ and then you add the intercept $b^{[1]}$. The result is called the linear unit.- Next, you take the relu of the linear unit. This process could be repeated several times for each $(W^{[l]}, b^{[l]})$ depending on the model architecture.- Finally, you take the sigmoid of the final linear unit. If it is greater than 0.5, you classify it to be a cat. 3.3 - General methodologyAs usual you will follow the Deep Learning methodology to build the model: 1. Initialize parameters / Define hyperparameters 2. Loop for num_iterations: a. Forward propagation b. Compute cost function c. Backward propagation d. Update parameters (using parameters, and grads from backprop) 4. Use trained parameters to predict labelsLet's now implement those two models! 4 - Two-layer neural network**Question**: Use the helper functions you have implemented in the previous assignment to build a 2-layer neural network with the following structure: *LINEAR -> RELU -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:```pythondef initialize_parameters(n_x, n_h, n_y): ... return parameters def linear_activation_forward(A_prev, W, b, activation): ... return A, cachedef compute_cost(AL, Y): ... return costdef linear_activation_backward(dA, cache, activation): ... return dA_prev, dW, dbdef update_parameters(parameters, grads, learning_rate): ... return parameters```
###Code
### CONSTANTS DEFINING THE MODEL ####
n_x = 12288 # num_px * num_px * 3
n_h = 7
n_y = 1
layers_dims = (n_x, n_h, n_y)
# GRADED FUNCTION: two_layer_model
def two_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):
"""
Implements a two-layer neural network: LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (n_x, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- dimensions of the layers (n_x, n_h, n_y)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- If set to True, this will print the cost every 100 iterations
Returns:
parameters -- a dictionary containing W1, W2, b1, and b2
"""
np.random.seed(1)
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
(n_x, n_h, n_y) = layers_dims
# Initialize parameters dictionary, by calling one of the functions you'd previously implemented
### START CODE HERE ### (≈ 1 line of code)
parameters = initialize_parameters(n_x, n_h, n_y)
### END CODE HERE ###
# Get W1, b1, W2 and b2 from the dictionary parameters.
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> SIGMOID. Inputs: "X, W1, b1, W2, b2". Output: "A1, cache1, A2, cache2".
### START CODE HERE ### (≈ 2 lines of code)
A1, cache1 = linear_activation_forward(X, W1, b1, activation = 'relu')
A2, cache2 = linear_activation_forward(A1, W2, b2, activation = 'sigmoid')
### END CODE HERE ###
# Compute cost
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(A2, Y)
### END CODE HERE ###
# Initializing backward propagation
dA2 = - (np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))
# Backward propagation. Inputs: "dA2, cache2, cache1". Outputs: "dA1, dW2, db2; also dA0 (not used), dW1, db1".
### START CODE HERE ### (≈ 2 lines of code)
# dA1, dW2, db2 = None
# dA0, dW1, db1 = None
dA1, dW2, db2 = linear_activation_backward(dA2, cache2, activation = "sigmoid")
dA0, dW1, db1 = linear_activation_backward(dA1, cache1, activation = "relu")
### END CODE HERE ###
# Set grads['dWl'] to dW1, grads['db1'] to db1, grads['dW2'] to dW2, grads['db2'] to db2
grads['dW1'] = dW1
grads['db1'] = db1
grads['dW2'] = dW2
grads['db2'] = db2
# Update parameters.
### START CODE HERE ### (approx. 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Retrieve W1, b1, W2, b2 from parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print("Cost after iteration {}: {}".format(i, np.squeeze(cost)))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
###Output
_____no_output_____
###Markdown
Run the cell below to train your parameters. See if your model runs. The cost should be decreasing. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (⬛) on the upper bar of the notebook to stop the cell and try to find your error.
###Code
parameters = two_layer_model(train_x, train_y, layers_dims = (n_x, n_h, n_y), num_iterations = 2500, print_cost=True)
###Output
Cost after iteration 0: 0.693049735659989
Cost after iteration 100: 0.6464320953428849
Cost after iteration 200: 0.6325140647912678
Cost after iteration 300: 0.6015024920354665
Cost after iteration 400: 0.5601966311605748
Cost after iteration 500: 0.515830477276473
Cost after iteration 600: 0.4754901313943325
Cost after iteration 700: 0.43391631512257495
Cost after iteration 800: 0.4007977536203886
Cost after iteration 900: 0.35807050113237987
Cost after iteration 1000: 0.3394281538366413
Cost after iteration 1100: 0.30527536361962654
Cost after iteration 1200: 0.2749137728213015
Cost after iteration 1300: 0.24681768210614827
Cost after iteration 1400: 0.1985073503746611
Cost after iteration 1500: 0.17448318112556593
Cost after iteration 1600: 0.1708076297809661
Cost after iteration 1700: 0.11306524562164737
Cost after iteration 1800: 0.09629426845937163
Cost after iteration 1900: 0.08342617959726878
Cost after iteration 2000: 0.0743907870431909
Cost after iteration 2100: 0.06630748132267938
Cost after iteration 2200: 0.05919329501038176
Cost after iteration 2300: 0.05336140348560564
Cost after iteration 2400: 0.048554785628770226
###Markdown
**Expected Output**: **Cost after iteration 0** 0.6930497356599888 **Cost after iteration 100** 0.6464320953428849 **...** ... **Cost after iteration 2400** 0.048554785628770206 Good thing you built a vectorized implementation! Otherwise it might have taken 10 times longer to train this.Now, you can use the trained parameters to classify images from the dataset. To see your predictions on the training and test sets, run the cell below.
###Code
predictions_train = predict(train_x, train_y, parameters)
###Output
Accuracy: 1.0
###Markdown
**Expected Output**: **Accuracy** 1.0
###Code
predictions_test = predict(test_x, test_y, parameters)
###Output
Accuracy: 0.72
###Markdown
**Expected Output**: **Accuracy** 0.72 **Note**: You may notice that running the model on fewer iterations (say 1500) gives better accuracy on the test set. This is called "early stopping" and we will talk about it in the next course. Early stopping is a way to prevent overfitting. Congratulations! It seems that your 2-layer neural network has better performance (72%) than the logistic regression implementation (70%, assignment week 2). Let's see if you can do even better with an $L$-layer model. 5 - L-layer Neural Network**Question**: Use the helper functions you have implemented previously to build an $L$-layer neural network with the following structure: *[LINEAR -> RELU]$\times$(L-1) -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:```pythondef initialize_parameters_deep(layers_dims): ... return parameters def L_model_forward(X, parameters): ... return AL, cachesdef compute_cost(AL, Y): ... return costdef L_model_backward(AL, Y, caches): ... return gradsdef update_parameters(parameters, grads, learning_rate): ... return parameters```
###Code
### CONSTANTS ###
layers_dims = [12288, 20, 7, 5, 1] # 4-layer model
# GRADED FUNCTION: L_layer_model
def L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):#lr was 0.009
"""
Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.
Arguments:
X -- data, numpy array of shape (number of examples, num_px * num_px * 3)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
print_cost -- if True, it prints the cost every 100 steps
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(1)
costs = [] # keep track of cost
# Parameters initialization. (≈ 1 line of code)
### START CODE HERE ###
parameters = initialize_parameters_deep(layers_dims)
### END CODE HERE ###
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
### START CODE HERE ### (≈ 1 line of code)
AL, caches = L_model_forward(X, parameters)
### END CODE HERE ###
# Compute cost.
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(AL, Y)
### END CODE HERE ###
# Backward propagation.
### START CODE HERE ### (≈ 1 line of code)
grads = L_model_backward(AL, Y, caches)
### END CODE HERE ###
# Update parameters.
### START CODE HERE ### (≈ 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
###Output
_____no_output_____
###Markdown
You will now train the model as a 4-layer neural network. Run the cell below to train your model. The cost should decrease on every iteration. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (⬛) on the upper bar of the notebook to stop the cell and try to find your error.
###Code
parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True)
###Output
Cost after iteration 0: 0.771749
Cost after iteration 100: 0.672053
Cost after iteration 200: 0.648263
Cost after iteration 300: 0.611507
Cost after iteration 400: 0.567047
Cost after iteration 500: 0.540138
Cost after iteration 600: 0.527930
Cost after iteration 700: 0.465477
Cost after iteration 800: 0.369126
Cost after iteration 900: 0.391747
Cost after iteration 1000: 0.315187
Cost after iteration 1100: 0.272700
Cost after iteration 1200: 0.237419
Cost after iteration 1300: 0.199601
Cost after iteration 1400: 0.189263
Cost after iteration 1500: 0.161189
Cost after iteration 1600: 0.148214
Cost after iteration 1700: 0.137775
Cost after iteration 1800: 0.129740
Cost after iteration 1900: 0.121225
Cost after iteration 2000: 0.113821
Cost after iteration 2100: 0.107839
Cost after iteration 2200: 0.102855
Cost after iteration 2300: 0.100897
Cost after iteration 2400: 0.092878
###Markdown
**Expected Output**: **Cost after iteration 0** 0.771749 **Cost after iteration 100** 0.672053 **...** ... **Cost after iteration 2400** 0.092878
###Code
pred_train = predict(train_x, train_y, parameters)
###Output
_____no_output_____
###Markdown
**Train Accuracy** 0.985645933014
###Code
pred_test = predict(test_x, test_y, parameters)
###Output
_____no_output_____
###Markdown
**Expected Output**: **Test Accuracy** 0.8 Congrats! It seems that your 4-layer neural network has better performance (80%) than your 2-layer neural network (72%) on the same test set. This is good performance for this task. Nice job! Though in the next course on "Improving deep neural networks" you will learn how to obtain even higher accuracy by systematically searching for better hyperparameters (learning_rate, layers_dims, num_iterations, and others you'll also learn in the next course). 6) Results AnalysisFirst, let's take a look at some images the L-layer model labeled incorrectly. This will show a few mislabeled images.
###Code
print_mislabeled_images(classes, test_x, test_y, pred_test)
###Output
_____no_output_____
###Markdown
**A few types of images the model tends to do poorly on include:** - Cat body in an unusual position- Cat appears against a background of a similar color- Unusual cat color and species- Camera Angle- Brightness of the picture- Scale variation (cat is very large or small in image) 7) Test with your own image (optional/ungraded exercise) Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Change your image's name in the following code 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
###Code
## START CODE HERE ##
my_image = "my_image.jpg" # change this to the name of your image file
my_label_y = [1] # the true class of your image (1 -> cat, 0 -> non-cat)
## END CODE HERE ##
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((num_px*num_px*3,1))
my_image = my_image/255.
my_predicted_image = predict(my_image, my_label_y, parameters)
plt.imshow(image)
print ("y = " + str(np.squeeze(my_predicted_image)) + ", your L-layer model predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
###Output
_____no_output_____
###Markdown
Deep Neural Network for Image Classification: ApplicationWhen you finish this, you will have finished the last programming assignment of Week 4, and also the last programming assignment of this course! You will use use the functions you'd implemented in the previous assignment to build a deep network, and apply it to cat vs non-cat classification. Hopefully, you will see an improvement in accuracy relative to your previous logistic regression implementation. **After this assignment you will be able to:**- Build and apply a deep neural network to supervised learning. Let's get started! 1 - Packages Let's first import all the packages that you will need during this assignment. - [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.- dnn_app_utils provides the functions implemented in the "Building your Deep Neural Network: Step by Step" assignment to this notebook.- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.
###Code
import time
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
from dnn_app_utils_v3 import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
###Output
_____no_output_____
###Markdown
2 - DatasetYou will use the same "Cat vs non-Cat" dataset as in "Logistic Regression as a Neural Network" (Assignment 2). The model you had built had 70% test accuracy on classifying cats vs non-cats images. Hopefully, your new model will perform a better!**Problem Statement**: You are given a dataset ("data.h5") containing: - a training set of m_train images labelled as cat (1) or non-cat (0) - a test set of m_test images labelled as cat and non-cat - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB).Let's get more familiar with the dataset. Load the data by running the cell below.
###Code
train_x_orig, train_y, test_x_orig, test_y, classes = load_data()
###Output
_____no_output_____
###Markdown
The following code will show you an image in the dataset. Feel free to change the index and re-run the cell multiple times to see other images.
###Code
# Example of a picture
index = 10
plt.imshow(train_x_orig[index])
print ("y = " + str(train_y[0,index]) + ". It's a " + classes[train_y[0,index]].decode("utf-8") + " picture.")
# Explore your dataset
m_train = train_x_orig.shape[0]
num_px = train_x_orig.shape[1]
m_test = test_x_orig.shape[0]
print ("Number of training examples: " + str(m_train))
print ("Number of testing examples: " + str(m_test))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_x_orig shape: " + str(train_x_orig.shape))
print ("train_y shape: " + str(train_y.shape))
print ("test_x_orig shape: " + str(test_x_orig.shape))
print ("test_y shape: " + str(test_y.shape))
###Output
Number of training examples: 209
Number of testing examples: 50
Each image is of size: (64, 64, 3)
train_x_orig shape: (209, 64, 64, 3)
train_y shape: (1, 209)
test_x_orig shape: (50, 64, 64, 3)
test_y shape: (1, 50)
###Markdown
As usual, you reshape and standardize the images before feeding them to the network. The code is given in the cell below. Figure 1: Image to vector conversion.
###Code
# Reshape the training and test examples
train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T # The "-1" makes reshape flatten the remaining dimensions
test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T
# Standardize data to have feature values between 0 and 1.
train_x = train_x_flatten/255.
test_x = test_x_flatten/255.
print ("train_x's shape: " + str(train_x.shape))
print ("test_x's shape: " + str(test_x.shape))
###Output
train_x's shape: (12288, 209)
test_x's shape: (12288, 50)
###Markdown
$12,288$ equals $64 \times 64 \times 3$ which is the size of one reshaped image vector. 3 - Architecture of your model Now that you are familiar with the dataset, it is time to build a deep neural network to distinguish cat images from non-cat images.You will build two different models:- A 2-layer neural network- An L-layer deep neural networkYou will then compare the performance of these models, and also try out different values for $L$. Let's look at the two architectures. 3.1 - 2-layer neural network Figure 2: 2-layer neural network. The model can be summarized as: ***INPUT -> LINEAR -> RELU -> LINEAR -> SIGMOID -> OUTPUT***. Detailed Architecture of figure 2:- The input is a (64,64,3) image which is flattened to a vector of size $(12288,1)$. - The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ of size $(n^{[1]}, 12288)$.- You then add a bias term and take its relu to get the following vector: $[a_0^{[1]}, a_1^{[1]},..., a_{n^{[1]}-1}^{[1]}]^T$.- You then repeat the same process.- You multiply the resulting vector by $W^{[2]}$ and add your intercept (bias). - Finally, you take the sigmoid of the result. If it is greater than 0.5, you classify it to be a cat. 3.2 - L-layer deep neural networkIt is hard to represent an L-layer deep neural network with the above representation. However, here is a simplified network representation: Figure 3: L-layer neural network. The model can be summarized as: ***[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID***Detailed Architecture of figure 3:- The input is a (64,64,3) image which is flattened to a vector of size (12288,1).- The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ and then you add the intercept $b^{[1]}$. The result is called the linear unit.- Next, you take the relu of the linear unit. This process could be repeated several times for each $(W^{[l]}, b^{[l]})$ depending on the model architecture.- Finally, you take the sigmoid of the final linear unit. If it is greater than 0.5, you classify it to be a cat. 3.3 - General methodologyAs usual you will follow the Deep Learning methodology to build the model: 1. Initialize parameters / Define hyperparameters 2. Loop for num_iterations: a. Forward propagation b. Compute cost function c. Backward propagation d. Update parameters (using parameters, and grads from backprop) 4. Use trained parameters to predict labelsLet's now implement those two models! 4 - Two-layer neural network**Question**: Use the helper functions you have implemented in the previous assignment to build a 2-layer neural network with the following structure: *LINEAR -> RELU -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:```pythondef initialize_parameters(n_x, n_h, n_y): ... return parameters def linear_activation_forward(A_prev, W, b, activation): ... return A, cachedef compute_cost(AL, Y): ... return costdef linear_activation_backward(dA, cache, activation): ... return dA_prev, dW, dbdef update_parameters(parameters, grads, learning_rate): ... return parameters```
###Code
### CONSTANTS DEFINING THE MODEL ####
n_x = 12288 # num_px * num_px * 3
n_h = 7
n_y = 1
layers_dims = (n_x, n_h, n_y)
# GRADED FUNCTION: two_layer_model
def two_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):
"""
Implements a two-layer neural network: LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (n_x, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- dimensions of the layers (n_x, n_h, n_y)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- If set to True, this will print the cost every 100 iterations
Returns:
parameters -- a dictionary containing W1, W2, b1, and b2
"""
np.random.seed(1)
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
(n_x, n_h, n_y) = layers_dims
# Initialize parameters dictionary, by calling one of the functions you'd previously implemented
### START CODE HERE ### (≈ 1 line of code)
parameters = initialize_parameters(n_x, n_h, n_y)
### END CODE HERE ###
# Get W1, b1, W2 and b2 from the dictionary parameters.
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> SIGMOID. Inputs: "X, W1, b1, W2, b2". Output: "A1, cache1, A2, cache2".
### START CODE HERE ### (≈ 2 lines of code)
A1, cache1 = linear_activation_forward(X, W1, b1, activation="relu")
A2, cache2 = linear_activation_forward(A1, W2, b2, activation="sigmoid")
### END CODE HERE ###
# Compute cost
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(A2, Y)
### END CODE HERE ###
# Initializing backward propagation
dA2 = - (np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))
# Backward propagation. Inputs: "dA2, cache2, cache1". Outputs: "dA1, dW2, db2; also dA0 (not used), dW1, db1".
### START CODE HERE ### (≈ 2 lines of code)
dA1, dW2, db2 = linear_activation_backward(dA2, cache2, activation="sigmoid")
dA0, dW1, db1 = linear_activation_backward(dA1, cache1, activation="relu")
### END CODE HERE ###
# Set grads['dWl'] to dW1, grads['db1'] to db1, grads['dW2'] to dW2, grads['db2'] to db2
grads['dW1'] = dW1
grads['db1'] = db1
grads['dW2'] = dW2
grads['db2'] = db2
# Update parameters.
### START CODE HERE ### (approx. 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Retrieve W1, b1, W2, b2 from parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print("Cost after iteration {}: {}".format(i, np.squeeze(cost)))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
###Output
_____no_output_____
###Markdown
Run the cell below to train your parameters. See if your model runs. The cost should be decreasing. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (⬛) on the upper bar of the notebook to stop the cell and try to find your error.
###Code
parameters = two_layer_model(train_x, train_y, layers_dims = (n_x, n_h, n_y), num_iterations = 2500, print_cost=True)
###Output
Cost after iteration 0: 0.6930497356599888
Cost after iteration 100: 0.6464320953428849
Cost after iteration 200: 0.6325140647912677
Cost after iteration 300: 0.6015024920354665
Cost after iteration 400: 0.5601966311605748
Cost after iteration 500: 0.5158304772764729
Cost after iteration 600: 0.47549013139433255
Cost after iteration 700: 0.4339163151225749
Cost after iteration 800: 0.400797753620389
Cost after iteration 900: 0.3580705011323798
Cost after iteration 1000: 0.3394281538366412
Cost after iteration 1100: 0.3052753636196263
Cost after iteration 1200: 0.27491377282130197
Cost after iteration 1300: 0.24681768210614846
Cost after iteration 1400: 0.19850735037466088
Cost after iteration 1500: 0.17448318112556663
Cost after iteration 1600: 0.17080762978096237
Cost after iteration 1700: 0.11306524562164721
Cost after iteration 1800: 0.09629426845937147
Cost after iteration 1900: 0.08342617959726861
Cost after iteration 2000: 0.07439078704319078
Cost after iteration 2100: 0.06630748132267926
Cost after iteration 2200: 0.059193295010381654
Cost after iteration 2300: 0.05336140348560552
Cost after iteration 2400: 0.04855478562877016
###Markdown
**Expected Output**: **Cost after iteration 0** 0.6930497356599888 **Cost after iteration 100** 0.6464320953428849 **...** ... **Cost after iteration 2400** 0.048554785628770206 Good thing you built a vectorized implementation! Otherwise it might have taken 10 times longer to train this.Now, you can use the trained parameters to classify images from the dataset. To see your predictions on the training and test sets, run the cell below.
###Code
predictions_train = predict(train_x, train_y, parameters)
###Output
Accuracy: 1.0
###Markdown
**Expected Output**: **Accuracy** 1.0
###Code
predictions_test = predict(test_x, test_y, parameters)
###Output
Accuracy: 0.72
###Markdown
**Expected Output**: **Accuracy** 0.72 **Note**: You may notice that running the model on fewer iterations (say 1500) gives better accuracy on the test set. This is called "early stopping" and we will talk about it in the next course. Early stopping is a way to prevent overfitting. Congratulations! It seems that your 2-layer neural network has better performance (72%) than the logistic regression implementation (70%, assignment week 2). Let's see if you can do even better with an $L$-layer model. 5 - L-layer Neural Network**Question**: Use the helper functions you have implemented previously to build an $L$-layer neural network with the following structure: *[LINEAR -> RELU]$\times$(L-1) -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:```pythondef initialize_parameters_deep(layers_dims): ... return parameters def L_model_forward(X, parameters): ... return AL, cachesdef compute_cost(AL, Y): ... return costdef L_model_backward(AL, Y, caches): ... return gradsdef update_parameters(parameters, grads, learning_rate): ... return parameters```
###Code
### CONSTANTS ###
layers_dims = [12288, 20, 7, 5, 1] # 4-layer model
# GRADED FUNCTION: L_layer_model
def L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):#lr was 0.009
"""
Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.
Arguments:
X -- data, numpy array of shape (number of examples, num_px * num_px * 3)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
print_cost -- if True, it prints the cost every 100 steps
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(1)
costs = [] # keep track of cost
# Parameters initialization. (≈ 1 line of code)
### START CODE HERE ###
parameters = initialize_parameters_deep(layers_dims)
### END CODE HERE ###
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
### START CODE HERE ### (≈ 1 line of code)
AL, caches = L_model_forward(X, parameters)
### END CODE HERE ###
# Compute cost.
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(AL, Y)
### END CODE HERE ###
# Backward propagation.
### START CODE HERE ### (≈ 1 line of code)
grads = L_model_backward(AL, Y, caches)
### END CODE HERE ###
# Update parameters.
### START CODE HERE ### (≈ 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
###Output
_____no_output_____
###Markdown
You will now train the model as a 4-layer neural network. Run the cell below to train your model. The cost should decrease on every iteration. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (⬛) on the upper bar of the notebook to stop the cell and try to find your error.
###Code
parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True)
###Output
Cost after iteration 0: 0.771749
Cost after iteration 100: 0.672053
Cost after iteration 200: 0.648263
Cost after iteration 300: 0.611507
Cost after iteration 400: 0.567047
Cost after iteration 500: 0.540138
Cost after iteration 600: 0.527930
Cost after iteration 700: 0.465477
Cost after iteration 800: 0.369126
Cost after iteration 900: 0.391747
Cost after iteration 1000: 0.315187
Cost after iteration 1100: 0.272700
Cost after iteration 1200: 0.237419
Cost after iteration 1300: 0.199601
Cost after iteration 1400: 0.189263
Cost after iteration 1500: 0.161189
Cost after iteration 1600: 0.148214
Cost after iteration 1700: 0.137775
Cost after iteration 1800: 0.129740
Cost after iteration 1900: 0.121225
Cost after iteration 2000: 0.113821
Cost after iteration 2100: 0.107839
Cost after iteration 2200: 0.102855
Cost after iteration 2300: 0.100897
Cost after iteration 2400: 0.092878
###Markdown
**Expected Output**: **Cost after iteration 0** 0.771749 **Cost after iteration 100** 0.672053 **...** ... **Cost after iteration 2400** 0.092878
###Code
pred_train = predict(train_x, train_y, parameters)
###Output
Accuracy: 0.985645933014
###Markdown
**Train Accuracy** 0.985645933014
###Code
pred_test = predict(test_x, test_y, parameters)
###Output
Accuracy: 0.8
###Markdown
**Expected Output**: **Test Accuracy** 0.8 Congrats! It seems that your 4-layer neural network has better performance (80%) than your 2-layer neural network (72%) on the same test set. This is good performance for this task. Nice job! Though in the next course on "Improving deep neural networks" you will learn how to obtain even higher accuracy by systematically searching for better hyperparameters (learning_rate, layers_dims, num_iterations, and others you'll also learn in the next course). 6) Results AnalysisFirst, let's take a look at some images the L-layer model labeled incorrectly. This will show a few mislabeled images.
###Code
print_mislabeled_images(classes, test_x, test_y, pred_test)
###Output
_____no_output_____
###Markdown
**A few types of images the model tends to do poorly on include:** - Cat body in an unusual position- Cat appears against a background of a similar color- Unusual cat color and species- Camera Angle- Brightness of the picture- Scale variation (cat is very large or small in image) 7) Test with your own image (optional/ungraded exercise) Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Change your image's name in the following code 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
###Code
## START CODE HERE ##
my_image = "tiger.jpg" # change this to the name of your image file
my_label_y = [1] # the true class of your image (1 -> cat, 0 -> non-cat)
## END CODE HERE ##
fname = "datasets/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((num_px*num_px*3,1))
my_image = my_image/255.
my_predicted_image = predict(my_image, my_label_y, parameters)
plt.imshow(image)
print ("y = " + str(np.squeeze(my_predicted_image)) + ", your L-layer model predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
###Output
Accuracy: 0.0
y = 0.0, your L-layer model predicts a "non-cat" picture.
###Markdown
Deep Neural Network for Image Classification: ApplicationWhen you finish this, you will have finished the last programming assignment of Week 4, and also the last programming assignment of this course! You will use the functions you'd implemented in the previous assignment to build a deep network, and apply it to cat vs non-cat classification. Hopefully, you will see an improvement in accuracy relative to your previous logistic regression implementation. **After this assignment you will be able to:**- Build and apply a deep neural network to supervised learning. Let's get started! 1 - Packages Let's first import all the packages that you will need during this assignment. - [numpy](https://www.numpy.org/) is the fundamental package for scientific computing with Python.- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.- dnn_app_utils provides the functions implemented in the "Building your Deep Neural Network: Step by Step" assignment to this notebook.- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.
###Code
import time
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
from dnn_app_utils_v3 import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
###Output
_____no_output_____
###Markdown
2 - DatasetYou will use the same "Cat vs non-Cat" dataset as in "Logistic Regression as a Neural Network" (Assignment 2). The model you had built had 70% test accuracy on classifying cats vs non-cats images. Hopefully, your new model will perform a better!**Problem Statement**: You are given a dataset ("data.h5") containing: - a training set of m_train images labelled as cat (1) or non-cat (0) - a test set of m_test images labelled as cat and non-cat - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB).Let's get more familiar with the dataset. Load the data by running the cell below.
###Code
train_x_orig, train_y, test_x_orig, test_y, classes = load_data()
###Output
_____no_output_____
###Markdown
The following code will show you an image in the dataset. Feel free to change the index and re-run the cell multiple times to see other images.
###Code
# Example of a picture
index = 10
plt.imshow(train_x_orig[index])
print ("y = " + str(train_y[0,index]) + ". It's a " + classes[train_y[0,index]].decode("utf-8") + " picture.")
# Explore your dataset
m_train = train_x_orig.shape[0]
num_px = train_x_orig.shape[1]
m_test = test_x_orig.shape[0]
print ("Number of training examples: " + str(m_train))
print ("Number of testing examples: " + str(m_test))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_x_orig shape: " + str(train_x_orig.shape))
print ("train_y shape: " + str(train_y.shape))
print ("test_x_orig shape: " + str(test_x_orig.shape))
print ("test_y shape: " + str(test_y.shape))
###Output
Number of training examples: 209
Number of testing examples: 50
Each image is of size: (64, 64, 3)
train_x_orig shape: (209, 64, 64, 3)
train_y shape: (1, 209)
test_x_orig shape: (50, 64, 64, 3)
test_y shape: (1, 50)
###Markdown
As usual, you reshape and standardize the images before feeding them to the network. The code is given in the cell below. Figure 1: Image to vector conversion.
###Code
# Reshape the training and test examples
train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T # The "-1" makes reshape flatten the remaining dimensions
test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T
# Standardize data to have feature values between 0 and 1.
train_x = train_x_flatten/255.
test_x = test_x_flatten/255.
print ("train_x's shape: " + str(train_x.shape))
print ("test_x's shape: " + str(test_x.shape))
###Output
train_x's shape: (12288, 209)
test_x's shape: (12288, 50)
###Markdown
$12,288$ equals $64 \times 64 \times 3$ which is the size of one reshaped image vector. 3 - Architecture of your model Now that you are familiar with the dataset, it is time to build a deep neural network to distinguish cat images from non-cat images.You will build two different models:- A 2-layer neural network- An L-layer deep neural networkYou will then compare the performance of these models, and also try out different values for $L$. Let's look at the two architectures. 3.1 - 2-layer neural network Figure 2: 2-layer neural network. The model can be summarized as: ***INPUT -> LINEAR -> RELU -> LINEAR -> SIGMOID -> OUTPUT***. Detailed Architecture of figure 2:- The input is a (64,64,3) image which is flattened to a vector of size $(12288,1)$. - The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ of size $(n^{[1]}, 12288)$.- You then add a bias term and take its relu to get the following vector: $[a_0^{[1]}, a_1^{[1]},..., a_{n^{[1]}-1}^{[1]}]^T$.- You then repeat the same process.- You multiply the resulting vector by $W^{[2]}$ and add your intercept (bias). - Finally, you take the sigmoid of the result. If it is greater than 0.5, you classify it to be a cat. 3.2 - L-layer deep neural networkIt is hard to represent an L-layer deep neural network with the above representation. However, here is a simplified network representation: Figure 3: L-layer neural network. The model can be summarized as: ***[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID***Detailed Architecture of figure 3:- The input is a (64,64,3) image which is flattened to a vector of size (12288,1).- The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ and then you add the intercept $b^{[1]}$. The result is called the linear unit.- Next, you take the relu of the linear unit. This process could be repeated several times for each $(W^{[l]}, b^{[l]})$ depending on the model architecture.- Finally, you take the sigmoid of the final linear unit. If it is greater than 0.5, you classify it to be a cat. 3.3 - General methodologyAs usual you will follow the Deep Learning methodology to build the model: 1. Initialize parameters / Define hyperparameters 2. Loop for num_iterations: a. Forward propagation b. Compute cost function c. Backward propagation d. Update parameters (using parameters, and grads from backprop) 4. Use trained parameters to predict labelsLet's now implement those two models! 4 - Two-layer neural network**Question**: Use the helper functions you have implemented in the previous assignment to build a 2-layer neural network with the following structure: *LINEAR -> RELU -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:```pythondef initialize_parameters(n_x, n_h, n_y): ... return parameters def linear_activation_forward(A_prev, W, b, activation): ... return A, cachedef compute_cost(AL, Y): ... return costdef linear_activation_backward(dA, cache, activation): ... return dA_prev, dW, dbdef update_parameters(parameters, grads, learning_rate): ... return parameters```
###Code
### CONSTANTS DEFINING THE MODEL ####
n_x = 12288 # num_px * num_px * 3
n_h = 7
n_y = 1
layers_dims = (n_x, n_h, n_y)
# GRADED FUNCTION: two_layer_model
def two_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):
"""
Implements a two-layer neural network: LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (n_x, number of examples)
Y -- true "label" vector (containing 1 if cat, 0 if non-cat), of shape (1, number of examples)
layers_dims -- dimensions of the layers (n_x, n_h, n_y)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- If set to True, this will print the cost every 100 iterations
Returns:
parameters -- a dictionary containing W1, W2, b1, and b2
"""
np.random.seed(1)
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
(n_x, n_h, n_y) = layers_dims
# Initialize parameters dictionary, by calling one of the functions you'd previously implemented
### START CODE HERE ### (≈ 1 line (of code)
parameters = initialize_parameters(n_x, n_h, n_y)
### END CODE HERE ###
# Get W1, b1, W2 and b2 from the dictionary parameters.
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> SIGMOID. Inputs: "X, W1, b1, W2, b2". Output: "A1, cache1, A2, cache2".
### START CODE HERE ### (≈ 2 lines of code)
A1, cache1 = linear_activation_forward(X, W1, b1, activation= "relu")
A2, cache2 = linear_activation_forward(A1, W2, b2, activation = "sigmoid")
### END CODE HERE ###
# Compute cost
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(A2, Y)
### END CODE HERE ###
# Initializing backward propagation
dA2 = - (np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))
# Backward propagation. Inputs: "dA2, cache2, cache1". Outputs: "dA1, dW2, db2; also dA0 (not used), dW1, db1".
### START CODE HERE ### (≈ 2 lines of code)
dA1, dW2, db2 = linear_activation_backward(dA2, cache2, activation = "sigmoid")
dA0, dW1, db1 = linear_activation_backward(dA1, cache1, activation = "relu")
### END CODE HERE ###
# Set grads['dWl'] to dW1, grads['db1'] to db1, grads['dW2'] to dW2, grads['db2'] to db2
grads['dW1'] = dW1
grads['db1'] = db1
grads['dW2'] = dW2
grads['db2'] = db2
# Update parameters.
### START CODE HERE ### (approx. 1 line of code)
parameters = update_parameters(parameters, grads,learning_rate)
### END CODE HERE ###
# Retrieve W1, b1, W2, b2 from parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print("Cost after iteration {}: {}".format(i, np.squeeze(cost)))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
###Output
_____no_output_____
###Markdown
Run the cell below to train your parameters. See if your model runs. The cost should be decreasing. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (⬛) on the upper bar of the notebook to stop the cell and try to find your error.
###Code
parameters = two_layer_model(train_x, train_y, layers_dims = (n_x, n_h, n_y), num_iterations = 2500, print_cost=True)
###Output
Cost after iteration 0: 0.6930497356599888
Cost after iteration 100: 0.6464320953428849
Cost after iteration 200: 0.6325140647912677
Cost after iteration 300: 0.6015024920354665
Cost after iteration 400: 0.5601966311605747
Cost after iteration 500: 0.515830477276473
Cost after iteration 600: 0.4754901313943325
Cost after iteration 700: 0.4339163151225749
Cost after iteration 800: 0.4007977536203887
Cost after iteration 900: 0.3580705011323798
Cost after iteration 1000: 0.3394281538366412
Cost after iteration 1100: 0.3052753636196264
Cost after iteration 1200: 0.27491377282130164
Cost after iteration 1300: 0.24681768210614846
Cost after iteration 1400: 0.19850735037466116
Cost after iteration 1500: 0.1744831811255664
Cost after iteration 1600: 0.17080762978096148
Cost after iteration 1700: 0.11306524562164734
Cost after iteration 1800: 0.09629426845937152
Cost after iteration 1900: 0.08342617959726863
Cost after iteration 2000: 0.07439078704319081
Cost after iteration 2100: 0.0663074813226793
Cost after iteration 2200: 0.0591932950103817
Cost after iteration 2300: 0.053361403485605585
Cost after iteration 2400: 0.04855478562877016
###Markdown
**Expected Output**: **Cost after iteration 0** 0.6930497356599888 **Cost after iteration 100** 0.6464320953428849 **...** ... **Cost after iteration 2400** 0.048554785628770226 Good thing you built a vectorized implementation! Otherwise it might have taken 10 times longer to train this.Now, you can use the trained parameters to classify images from the dataset. To see your predictions on the training and test sets, run the cell below.
###Code
predictions_train = predict(train_x, train_y, parameters)
###Output
Accuracy: 1.0
###Markdown
**Expected Output**: **Accuracy** 1.0
###Code
predictions_test = predict(test_x, test_y, parameters)
###Output
Accuracy: 0.72
###Markdown
**Expected Output**: **Accuracy** 0.72 **Note**: You may notice that running the model on fewer iterations (say 1500) gives better accuracy on the test set. This is called "early stopping" and we will talk about it in the next course. Early stopping is a way to prevent overfitting. Congratulations! It seems that your 2-layer neural network has better performance (72%) than the logistic regression implementation (70%, assignment week 2). Let's see if you can do even better with an $L$-layer model. 5 - L-layer Neural Network**Question**: Use the helper functions you have implemented previously to build an $L$-layer neural network with the following structure: *[LINEAR -> RELU]$\times$(L-1) -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:```pythondef initialize_parameters_deep(layers_dims): ... return parameters def L_model_forward(X, parameters): ... return AL, cachesdef compute_cost(AL, Y): ... return costdef L_model_backward(AL, Y, caches): ... return gradsdef update_parameters(parameters, grads, learning_rate): ... return parameters```
###Code
### CONSTANTS ###
layers_dims = [12288, 20, 7, 5, 1] # 4-layer model
# GRADED FUNCTION: L_layer_model
def L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):#lr was 0.009
"""
Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.
Arguments:
X -- data, numpy array of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
print_cost -- if True, it prints the cost every 100 steps
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(1)
costs = [] # keep track of cost
# Parameters initialization. (≈ 1 line of code)
### START CODE HERE ###
parameters = initialize_parameters_deep(layers_dims)
### END CODE HERE ###
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
### START CODE HERE ### (≈ 1 line of code)
AL, caches = L_model_forward(X, parameters)
### END CODE HERE ###
# Compute cost.
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(AL, Y)
### END CODE HERE ###
# Backward propagation.
### START CODE HERE ### (≈ 1 line of code)
grads = L_model_backward(AL, Y, caches)
### END CODE HERE ###
# Update parameters.
### START CODE HERE ### (≈ 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
###Output
_____no_output_____
###Markdown
You will now train the model as a 4-layer neural network. Run the cell below to train your model. The cost should decrease on every iteration. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (⬛) on the upper bar of the notebook to stop the cell and try to find your error.
###Code
parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True)
###Output
Cost after iteration 0: 0.771749
Cost after iteration 100: 0.672053
Cost after iteration 200: 0.648263
Cost after iteration 300: 0.611507
Cost after iteration 400: 0.567047
Cost after iteration 500: 0.540138
Cost after iteration 600: 0.527930
Cost after iteration 700: 0.465477
Cost after iteration 800: 0.369126
Cost after iteration 900: 0.391747
Cost after iteration 1000: 0.315187
Cost after iteration 1100: 0.272700
Cost after iteration 1200: 0.237419
Cost after iteration 1300: 0.199601
Cost after iteration 1400: 0.189263
Cost after iteration 1500: 0.161189
Cost after iteration 1600: 0.148214
Cost after iteration 1700: 0.137775
Cost after iteration 1800: 0.129740
Cost after iteration 1900: 0.121225
Cost after iteration 2000: 0.113821
Cost after iteration 2100: 0.107839
Cost after iteration 2200: 0.102855
Cost after iteration 2300: 0.100897
Cost after iteration 2400: 0.092878
###Markdown
**Expected Output**: **Cost after iteration 0** 0.771749 **Cost after iteration 100** 0.672053 **...** ... **Cost after iteration 2400** 0.092878
###Code
pred_train = predict(train_x, train_y, parameters)
###Output
Accuracy: 0.985645933014
###Markdown
**Train Accuracy** 0.985645933014
###Code
pred_test = predict(test_x, test_y, parameters)
###Output
Accuracy: 0.8
###Markdown
**Expected Output**: **Test Accuracy** 0.8 Congrats! It seems that your 4-layer neural network has better performance (80%) than your 2-layer neural network (72%) on the same test set. This is good performance for this task. Nice job! Though in the next course on "Improving deep neural networks" you will learn how to obtain even higher accuracy by systematically searching for better hyperparameters (learning_rate, layers_dims, num_iterations, and others you'll also learn in the next course). 6) Results AnalysisFirst, let's take a look at some images the L-layer model labeled incorrectly. This will show a few mislabeled images.
###Code
print_mislabeled_images(classes, test_x, test_y, pred_test)
###Output
_____no_output_____
###Markdown
**A few types of images the model tends to do poorly on include:** - Cat body in an unusual position- Cat appears against a background of a similar color- Unusual cat color and species- Camera Angle- Brightness of the picture- Scale variation (cat is very large or small in image) 7) Test with your own image (optional/ungraded exercise) Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Change your image's name in the following code 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
###Code
## START CODE HERE ##
my_image = "caty.jpg" # change this to the name of your image file
my_label_y = [1] # the true class of your image (1 -> cat, 0 -> non-cat)
## END CODE HERE ##
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((num_px*num_px*3,1))
my_image = my_image/255.
my_predicted_image = predict(my_image, my_label_y, parameters)
plt.imshow(image)
print ("y = " + str(np.squeeze(my_predicted_image)) + ", your L-layer model predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
###Output
Accuracy: 1.0
y = 1.0, your L-layer model predicts a "cat" picture.
###Markdown
Deep Neural Network for Image Classification: ApplicationWhen you finish this, you will have finished the last programming assignment of Week 4, and also the last programming assignment of this course! You will use the functions you'd implemented in the previous assignment to build a deep network, and apply it to cat vs non-cat classification. Hopefully, you will see an improvement in accuracy relative to your previous logistic regression implementation. **After this assignment you will be able to:**- Build and apply a deep neural network to supervised learning. Let's get started! 1 - Packages Let's first import all the packages that you will need during this assignment. - [numpy](https://www.numpy.org/) is the fundamental package for scientific computing with Python.- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.- dnn_app_utils provides the functions implemented in the "Building your Deep Neural Network: Step by Step" assignment to this notebook.- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.
###Code
import time
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
from dnn_app_utils_v3 import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
###Output
_____no_output_____
###Markdown
2 - DatasetYou will use the same "Cat vs non-Cat" dataset as in "Logistic Regression as a Neural Network" (Assignment 2). The model you had built had 70% test accuracy on classifying cats vs non-cats images. Hopefully, your new model will perform a better!**Problem Statement**: You are given a dataset ("data.h5") containing: - a training set of m_train images labelled as cat (1) or non-cat (0) - a test set of m_test images labelled as cat and non-cat - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB).Let's get more familiar with the dataset. Load the data by running the cell below.
###Code
train_x_orig, train_y, test_x_orig, test_y, classes = load_data()
###Output
_____no_output_____
###Markdown
The following code will show you an image in the dataset. Feel free to change the index and re-run the cell multiple times to see other images.
###Code
# Example of a picture
index = 10
plt.imshow(train_x_orig[index])
print ("y = " + str(train_y[0,index]) + ". It's a " + classes[train_y[0,index]].decode("utf-8") + " picture.")
# Explore your dataset
m_train = train_x_orig.shape[0]
num_px = train_x_orig.shape[1]
m_test = test_x_orig.shape[0]
print ("Number of training examples: " + str(m_train))
print ("Number of testing examples: " + str(m_test))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_x_orig shape: " + str(train_x_orig.shape))
print ("train_y shape: " + str(train_y.shape))
print ("test_x_orig shape: " + str(test_x_orig.shape))
print ("test_y shape: " + str(test_y.shape))
###Output
Number of training examples: 209
Number of testing examples: 50
Each image is of size: (64, 64, 3)
train_x_orig shape: (209, 64, 64, 3)
train_y shape: (1, 209)
test_x_orig shape: (50, 64, 64, 3)
test_y shape: (1, 50)
###Markdown
As usual, you reshape and standardize the images before feeding them to the network. The code is given in the cell below. Figure 1: Image to vector conversion.
###Code
# Reshape the training and test examples
train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T # The "-1" makes reshape flatten the remaining dimensions
test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T
# Standardize data to have feature values between 0 and 1.
train_x = train_x_flatten/255.
test_x = test_x_flatten/255.
print ("train_x's shape: " + str(train_x.shape))
print ("test_x's shape: " + str(test_x.shape))
###Output
train_x's shape: (12288, 209)
test_x's shape: (12288, 50)
###Markdown
$12,288$ equals $64 \times 64 \times 3$ which is the size of one reshaped image vector. 3 - Architecture of your model Now that you are familiar with the dataset, it is time to build a deep neural network to distinguish cat images from non-cat images.You will build two different models:- A 2-layer neural network- An L-layer deep neural networkYou will then compare the performance of these models, and also try out different values for $L$. Let's look at the two architectures. 3.1 - 2-layer neural network Figure 2: 2-layer neural network. The model can be summarized as: ***INPUT -> LINEAR -> RELU -> LINEAR -> SIGMOID -> OUTPUT***. Detailed Architecture of figure 2:- The input is a (64,64,3) image which is flattened to a vector of size $(12288,1)$. - The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ of size $(n^{[1]}, 12288)$.- You then add a bias term and take its relu to get the following vector: $[a_0^{[1]}, a_1^{[1]},..., a_{n^{[1]}-1}^{[1]}]^T$.- You then repeat the same process.- You multiply the resulting vector by $W^{[2]}$ and add your intercept (bias). - Finally, you take the sigmoid of the result. If it is greater than 0.5, you classify it to be a cat. 3.2 - L-layer deep neural networkIt is hard to represent an L-layer deep neural network with the above representation. However, here is a simplified network representation: Figure 3: L-layer neural network. The model can be summarized as: ***[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID***Detailed Architecture of figure 3:- The input is a (64,64,3) image which is flattened to a vector of size (12288,1).- The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ and then you add the intercept $b^{[1]}$. The result is called the linear unit.- Next, you take the relu of the linear unit. This process could be repeated several times for each $(W^{[l]}, b^{[l]})$ depending on the model architecture.- Finally, you take the sigmoid of the final linear unit. If it is greater than 0.5, you classify it to be a cat. 3.3 - General methodologyAs usual you will follow the Deep Learning methodology to build the model: 1. Initialize parameters / Define hyperparameters 2. Loop for num_iterations: a. Forward propagation b. Compute cost function c. Backward propagation d. Update parameters (using parameters, and grads from backprop) 4. Use trained parameters to predict labelsLet's now implement those two models! 4 - Two-layer neural network**Question**: Use the helper functions you have implemented in the previous assignment to build a 2-layer neural network with the following structure: *LINEAR -> RELU -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:```pythondef initialize_parameters(n_x, n_h, n_y): ... return parameters def linear_activation_forward(A_prev, W, b, activation): ... return A, cachedef compute_cost(AL, Y): ... return costdef linear_activation_backward(dA, cache, activation): ... return dA_prev, dW, dbdef update_parameters(parameters, grads, learning_rate): ... return parameters```
###Code
### CONSTANTS DEFINING THE MODEL ####
n_x = 12288 # num_px * num_px * 3
n_h = 7
n_y = 1
layers_dims = (n_x, n_h, n_y)
# GRADED FUNCTION: two_layer_model
def two_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):
"""
Implements a two-layer neural network: LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (n_x, number of examples)
Y -- true "label" vector (containing 1 if cat, 0 if non-cat), of shape (1, number of examples)
layers_dims -- dimensions of the layers (n_x, n_h, n_y)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- If set to True, this will print the cost every 100 iterations
Returns:
parameters -- a dictionary containing W1, W2, b1, and b2
"""
np.random.seed(1)
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
(n_x, n_h, n_y) = layers_dims
# Initialize parameters dictionary, by calling one of the functions you'd previously implemented
### START CODE HERE ### (≈ 1 line of code)
parameters = initialize_parameters(n_x, n_h, n_y)
### END CODE HERE ###
# Get W1, b1, W2 and b2 from the dictionary parameters.
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> SIGMOID. Inputs: "X, W1, b1, W2, b2". Output: "A1, cache1, A2, cache2".
### START CODE HERE ### (≈ 2 lines of code)
A1, cache1 = linear_activation_forward(X, W1, b1, 'relu')
A2, cache2 = linear_activation_forward(A1, W2, b2, 'sigmoid')
### END CODE HERE ###
# Compute cost
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(A2, Y)
### END CODE HERE ###
# Initializing backward propagation
dA2 = - (np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))
# Backward propagation. Inputs: "dA2, cache2, cache1". Outputs: "dA1, dW2, db2; also dA0 (not used), dW1, db1".
### START CODE HERE ### (≈ 2 lines of code)
dA1, dW2, db2 = linear_activation_backward(dA2, cache2, 'sigmoid')
dA0, dW1, db1 = linear_activation_backward(dA1, cache1, 'relu')
### END CODE HERE ###
# Set grads['dWl'] to dW1, grads['db1'] to db1, grads['dW2'] to dW2, grads['db2'] to db2
grads['dW1'] = dW1
grads['db1'] = db1
grads['dW2'] = dW2
grads['db2'] = db2
# Update parameters.
### START CODE HERE ### (approx. 1 line of code)
parameters = update_parameters(parameters,grads,learning_rate)
### END CODE HERE ###
# Retrieve W1, b1, W2, b2 from parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print("Cost after iteration {}: {}".format(i, np.squeeze(cost)))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
###Output
_____no_output_____
###Markdown
Run the cell below to train your parameters. See if your model runs. The cost should be decreasing. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (⬛) on the upper bar of the notebook to stop the cell and try to find your error.
###Code
parameters = two_layer_model(train_x, train_y, layers_dims = (n_x, n_h, n_y), num_iterations = 2500, print_cost=True)
###Output
Cost after iteration 0: 0.6930497356599888
Cost after iteration 100: 0.6464320953428849
Cost after iteration 200: 0.6325140647912677
Cost after iteration 300: 0.6015024920354665
Cost after iteration 400: 0.5601966311605747
Cost after iteration 500: 0.5158304772764729
Cost after iteration 600: 0.47549013139433255
Cost after iteration 700: 0.43391631512257495
Cost after iteration 800: 0.400797753620389
Cost after iteration 900: 0.3580705011323798
Cost after iteration 1000: 0.3394281538366411
Cost after iteration 1100: 0.3052753636196264
Cost after iteration 1200: 0.2749137728213018
Cost after iteration 1300: 0.24681768210614854
Cost after iteration 1400: 0.19850735037466094
Cost after iteration 1500: 0.17448318112556666
Cost after iteration 1600: 0.17080762978096128
Cost after iteration 1700: 0.11306524562164724
Cost after iteration 1800: 0.09629426845937152
Cost after iteration 1900: 0.08342617959726856
Cost after iteration 2000: 0.07439078704319078
Cost after iteration 2100: 0.06630748132267927
Cost after iteration 2200: 0.05919329501038164
Cost after iteration 2300: 0.05336140348560553
Cost after iteration 2400: 0.048554785628770115
###Markdown
**Expected Output**: **Cost after iteration 0** 0.6930497356599888 **Cost after iteration 100** 0.6464320953428849 **...** ... **Cost after iteration 2400** 0.048554785628770226 Good thing you built a vectorized implementation! Otherwise it might have taken 10 times longer to train this.Now, you can use the trained parameters to classify images from the dataset. To see your predictions on the training and test sets, run the cell below.
###Code
predictions_train = predict(train_x, train_y, parameters)
###Output
Accuracy: 1.0
###Markdown
**Expected Output**: **Accuracy** 1.0
###Code
predictions_test = predict(test_x, test_y, parameters)
###Output
Accuracy: 0.72
###Markdown
**Expected Output**: **Accuracy** 0.72 **Note**: You may notice that running the model on fewer iterations (say 1500) gives better accuracy on the test set. This is called "early stopping" and we will talk about it in the next course. Early stopping is a way to prevent overfitting. Congratulations! It seems that your 2-layer neural network has better performance (72%) than the logistic regression implementation (70%, assignment week 2). Let's see if you can do even better with an $L$-layer model. 5 - L-layer Neural Network**Question**: Use the helper functions you have implemented previously to build an $L$-layer neural network with the following structure: *[LINEAR -> RELU]$\times$(L-1) -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:```pythondef initialize_parameters_deep(layers_dims): ... return parameters def L_model_forward(X, parameters): ... return AL, cachesdef compute_cost(AL, Y): ... return costdef L_model_backward(AL, Y, caches): ... return gradsdef update_parameters(parameters, grads, learning_rate): ... return parameters```
###Code
### CONSTANTS ###
layers_dims = [12288, 20, 7, 5, 1] # 4-layer model
# GRADED FUNCTION: L_layer_model
def L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):#lr was 0.009
"""
Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.
Arguments:
X -- data, numpy array of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
print_cost -- if True, it prints the cost every 100 steps
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(1)
costs = [] # keep track of cost
# Parameters initialization. (≈ 1 line of code)
### START CODE HERE ###
parameters = initialize_parameters_deep(layers_dims)
### END CODE HERE ###
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
### START CODE HERE ### (≈ 1 line of code)
AL, caches = L_model_forward(X, parameters)
### END CODE HERE ###
# Compute cost.
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(AL,Y)
### END CODE HERE ###
# Backward propagation.
### START CODE HERE ### (≈ 1 line of code)
grads = L_model_backward(AL, Y, caches)
### END CODE HERE ###
# Update parameters.
### START CODE HERE ### (≈ 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
###Output
_____no_output_____
###Markdown
You will now train the model as a 4-layer neural network. Run the cell below to train your model. The cost should decrease on every iteration. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (⬛) on the upper bar of the notebook to stop the cell and try to find your error.
###Code
parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True)
###Output
Cost after iteration 0: 0.771749
Cost after iteration 100: 0.672053
Cost after iteration 200: 0.648263
Cost after iteration 300: 0.611507
Cost after iteration 400: 0.567047
Cost after iteration 500: 0.540138
Cost after iteration 600: 0.527930
Cost after iteration 700: 0.465477
Cost after iteration 800: 0.369126
Cost after iteration 900: 0.391747
Cost after iteration 1000: 0.315187
Cost after iteration 1100: 0.272700
Cost after iteration 1200: 0.237419
Cost after iteration 1300: 0.199601
Cost after iteration 1400: 0.189263
Cost after iteration 1500: 0.161189
Cost after iteration 1600: 0.148214
Cost after iteration 1700: 0.137775
Cost after iteration 1800: 0.129740
Cost after iteration 1900: 0.121225
Cost after iteration 2000: 0.113821
Cost after iteration 2100: 0.107839
Cost after iteration 2200: 0.102855
Cost after iteration 2300: 0.100897
Cost after iteration 2400: 0.092878
###Markdown
**Expected Output**: **Cost after iteration 0** 0.771749 **Cost after iteration 100** 0.672053 **...** ... **Cost after iteration 2400** 0.092878
###Code
pred_train = predict(train_x, train_y, parameters)
###Output
Accuracy: 0.985645933014
###Markdown
**Train Accuracy** 0.985645933014
###Code
pred_test = predict(test_x, test_y, parameters)
###Output
Accuracy: 0.8
###Markdown
**Expected Output**: **Test Accuracy** 0.8 Congrats! It seems that your 4-layer neural network has better performance (80%) than your 2-layer neural network (72%) on the same test set. This is good performance for this task. Nice job! Though in the next course on "Improving deep neural networks" you will learn how to obtain even higher accuracy by systematically searching for better hyperparameters (learning_rate, layers_dims, num_iterations, and others you'll also learn in the next course). 6) Results AnalysisFirst, let's take a look at some images the L-layer model labeled incorrectly. This will show a few mislabeled images.
###Code
print_mislabeled_images(classes, test_x, test_y, pred_test)
###Output
_____no_output_____
###Markdown
**A few types of images the model tends to do poorly on include:** - Cat body in an unusual position- Cat appears against a background of a similar color- Unusual cat color and species- Camera Angle- Brightness of the picture- Scale variation (cat is very large or small in image) 7) Test with your own image (optional/ungraded exercise) Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Change your image's name in the following code 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
###Code
## START CODE HERE ##
my_image = "my_image.jpg" # change this to the name of your image file
my_label_y = [1] # the true class of your image (1 -> cat, 0 -> non-cat)
## END CODE HERE ##
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((num_px*num_px*3,1))
my_image = my_image/255.
my_predicted_image = predict(my_image, my_label_y, parameters)
plt.imshow(image)
print ("y = " + str(np.squeeze(my_predicted_image)) + ", your L-layer model predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
###Output
_____no_output_____
###Markdown
Deep Neural Network for Image Classification: ApplicationWhen you finish this, you will have finished the last programming assignment of Week 4, and also the last programming assignment of this course! You will use the functions you'd implemented in the previous assignment to build a deep network, and apply it to cat vs non-cat classification. Hopefully, you will see an improvement in accuracy relative to your previous logistic regression implementation. **After this assignment you will be able to:**- Build and apply a deep neural network to supervised learning. Let's get started! 1 - Packages Let's first import all the packages that you will need during this assignment. - [numpy](https://www.numpy.org/) is the fundamental package for scientific computing with Python.- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.- dnn_app_utils provides the functions implemented in the "Building your Deep Neural Network: Step by Step" assignment to this notebook.- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.
###Code
import time
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
from dnn_app_utils_v3 import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
###Output
_____no_output_____
###Markdown
2 - DatasetYou will use the same "Cat vs non-Cat" dataset as in "Logistic Regression as a Neural Network" (Assignment 2). The model you had built had 70% test accuracy on classifying cats vs non-cats images. Hopefully, your new model will perform a better!**Problem Statement**: You are given a dataset ("data.h5") containing: - a training set of m_train images labelled as cat (1) or non-cat (0) - a test set of m_test images labelled as cat and non-cat - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB).Let's get more familiar with the dataset. Load the data by running the cell below.
###Code
train_x_orig, train_y, test_x_orig, test_y, classes = load_data()
###Output
_____no_output_____
###Markdown
The following code will show you an image in the dataset. Feel free to change the index and re-run the cell multiple times to see other images.
###Code
# Example of a picture
index = 10
plt.imshow(train_x_orig[index])
print ("y = " + str(train_y[0,index]) + ". It's a " + classes[train_y[0,index]].decode("utf-8") + " picture.")
# Explore your dataset
m_train = train_x_orig.shape[0]
num_px = train_x_orig.shape[1]
m_test = test_x_orig.shape[0]
print ("Number of training examples: " + str(m_train))
print ("Number of testing examples: " + str(m_test))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_x_orig shape: " + str(train_x_orig.shape))
print ("train_y shape: " + str(train_y.shape))
print ("test_x_orig shape: " + str(test_x_orig.shape))
print ("test_y shape: " + str(test_y.shape))
###Output
Number of training examples: 209
Number of testing examples: 50
Each image is of size: (64, 64, 3)
train_x_orig shape: (209, 64, 64, 3)
train_y shape: (1, 209)
test_x_orig shape: (50, 64, 64, 3)
test_y shape: (1, 50)
###Markdown
As usual, you reshape and standardize the images before feeding them to the network. The code is given in the cell below. Figure 1: Image to vector conversion.
###Code
# Reshape the training and test examples
train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T # The "-1" makes reshape flatten the remaining dimensions
test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T
# Standardize data to have feature values between 0 and 1.
train_x = train_x_flatten/255.
test_x = test_x_flatten/255.
print ("train_x's shape: " + str(train_x.shape))
print ("test_x's shape: " + str(test_x.shape))
###Output
train_x's shape: (12288, 209)
test_x's shape: (12288, 50)
###Markdown
$12,288$ equals $64 \times 64 \times 3$ which is the size of one reshaped image vector. 3 - Architecture of your model Now that you are familiar with the dataset, it is time to build a deep neural network to distinguish cat images from non-cat images.You will build two different models:- A 2-layer neural network- An L-layer deep neural networkYou will then compare the performance of these models, and also try out different values for $L$. Let's look at the two architectures. 3.1 - 2-layer neural network Figure 2: 2-layer neural network. The model can be summarized as: ***INPUT -> LINEAR -> RELU -> LINEAR -> SIGMOID -> OUTPUT***. Detailed Architecture of figure 2:- The input is a (64,64,3) image which is flattened to a vector of size $(12288,1)$. - The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ of size $(n^{[1]}, 12288)$.- You then add a bias term and take its relu to get the following vector: $[a_0^{[1]}, a_1^{[1]},..., a_{n^{[1]}-1}^{[1]}]^T$.- You then repeat the same process.- You multiply the resulting vector by $W^{[2]}$ and add your intercept (bias). - Finally, you take the sigmoid of the result. If it is greater than 0.5, you classify it to be a cat. 3.2 - L-layer deep neural networkIt is hard to represent an L-layer deep neural network with the above representation. However, here is a simplified network representation: Figure 3: L-layer neural network. The model can be summarized as: ***[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID***Detailed Architecture of figure 3:- The input is a (64,64,3) image which is flattened to a vector of size (12288,1).- The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ and then you add the intercept $b^{[1]}$. The result is called the linear unit.- Next, you take the relu of the linear unit. This process could be repeated several times for each $(W^{[l]}, b^{[l]})$ depending on the model architecture.- Finally, you take the sigmoid of the final linear unit. If it is greater than 0.5, you classify it to be a cat. 3.3 - General methodologyAs usual you will follow the Deep Learning methodology to build the model: 1. Initialize parameters / Define hyperparameters 2. Loop for num_iterations: a. Forward propagation b. Compute cost function c. Backward propagation d. Update parameters (using parameters, and grads from backprop) 4. Use trained parameters to predict labelsLet's now implement those two models! 4 - Two-layer neural network**Question**: Use the helper functions you have implemented in the previous assignment to build a 2-layer neural network with the following structure: *LINEAR -> RELU -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:```pythondef initialize_parameters(n_x, n_h, n_y): ... return parameters def linear_activation_forward(A_prev, W, b, activation): ... return A, cachedef compute_cost(AL, Y): ... return costdef linear_activation_backward(dA, cache, activation): ... return dA_prev, dW, dbdef update_parameters(parameters, grads, learning_rate): ... return parameters```
###Code
### CONSTANTS DEFINING THE MODEL ####
n_x = 12288 # num_px * num_px * 3
n_h = 7
n_y = 1
layers_dims = (n_x, n_h, n_y)
# GRADED FUNCTION: two_layer_model
def two_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):
"""
Implements a two-layer neural network: LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (n_x, number of examples)
Y -- true "label" vector (containing 1 if cat, 0 if non-cat), of shape (1, number of examples)
layers_dims -- dimensions of the layers (n_x, n_h, n_y)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- If set to True, this will print the cost every 100 iterations
Returns:
parameters -- a dictionary containing W1, W2, b1, and b2
"""
np.random.seed(1)
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
(n_x, n_h, n_y) = layers_dims
# Initialize parameters dictionary, by calling one of the functions you'd previously implemented
### START CODE HERE ### (≈ 1 line of code)
parameters = initialize_parameters(n_x, n_h, n_y)
### END CODE HERE ###
# Get W1, b1, W2 and b2 from the dictionary parameters.
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> SIGMOID. Inputs: "X, W1, b1, W2, b2". Output: "A1, cache1, A2, cache2".
### START CODE HERE ### (≈ 2 lines of code)
A1, cache1 = linear_activation_forward(X, W1, b1, activation = "relu")
A2, cache2 = linear_activation_forward(A1, W2, b2, activation = "sigmoid")
### END CODE HERE ###
# Compute cost
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(A2,Y)
### END CODE HERE ###
# Initializing backward propagation
dA2 = - (np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))
# Backward propagation. Inputs: "dA2, cache2, cache1". Outputs: "dA1, dW2, db2; also dA0 (not used), dW1, db1".
### START CODE HERE ### (≈ 2 lines of code)
dA1, dW2, db2 = linear_activation_backward(dA2, cache2, activation = "sigmoid")
dA0, dW1, db1 = linear_activation_backward(dA1, cache1, activation = "relu")
### END CODE HERE ###
# Set grads['dWl'] to dW1, grads['db1'] to db1, grads['dW2'] to dW2, grads['db2'] to db2
grads['dW1'] = dW1
grads['db1'] = db1
grads['dW2'] = dW2
grads['db2'] = db2
# Update parameters.
### START CODE HERE ### (approx. 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Retrieve W1, b1, W2, b2 from parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print("Cost after iteration {}: {}".format(i, np.squeeze(cost)))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
###Output
_____no_output_____
###Markdown
Run the cell below to train your parameters. See if your model runs. The cost should be decreasing. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (⬛) on the upper bar of the notebook to stop the cell and try to find your error.
###Code
parameters = two_layer_model(train_x, train_y, layers_dims = (n_x, n_h, n_y), num_iterations = 2500, print_cost=True)
###Output
Cost after iteration 0: 0.6930497356599888
Cost after iteration 100: 0.6464320953428849
Cost after iteration 200: 0.6325140647912677
Cost after iteration 300: 0.6015024920354665
Cost after iteration 400: 0.5601966311605747
Cost after iteration 500: 0.5158304772764729
Cost after iteration 600: 0.47549013139433255
Cost after iteration 700: 0.43391631512257495
Cost after iteration 800: 0.400797753620389
Cost after iteration 900: 0.3580705011323798
Cost after iteration 1000: 0.3394281538366411
Cost after iteration 1100: 0.3052753636196264
Cost after iteration 1200: 0.2749137728213018
Cost after iteration 1300: 0.24681768210614854
Cost after iteration 1400: 0.19850735037466094
Cost after iteration 1500: 0.17448318112556666
Cost after iteration 1600: 0.17080762978096128
Cost after iteration 1700: 0.11306524562164724
Cost after iteration 1800: 0.09629426845937152
Cost after iteration 1900: 0.08342617959726856
Cost after iteration 2000: 0.07439078704319078
Cost after iteration 2100: 0.06630748132267927
Cost after iteration 2200: 0.05919329501038164
Cost after iteration 2300: 0.05336140348560553
Cost after iteration 2400: 0.048554785628770115
###Markdown
**Expected Output**: **Cost after iteration 0** 0.6930497356599888 **Cost after iteration 100** 0.6464320953428849 **...** ... **Cost after iteration 2400** 0.048554785628770226 Good thing you built a vectorized implementation! Otherwise it might have taken 10 times longer to train this.Now, you can use the trained parameters to classify images from the dataset. To see your predictions on the training and test sets, run the cell below.
###Code
predictions_train = predict(train_x, train_y, parameters)
###Output
Accuracy: 1.0
###Markdown
**Expected Output**: **Accuracy** 1.0
###Code
predictions_test = predict(test_x, test_y, parameters)
###Output
Accuracy: 0.72
###Markdown
**Expected Output**: **Accuracy** 0.72 **Note**: You may notice that running the model on fewer iterations (say 1500) gives better accuracy on the test set. This is called "early stopping" and we will talk about it in the next course. Early stopping is a way to prevent overfitting. Congratulations! It seems that your 2-layer neural network has better performance (72%) than the logistic regression implementation (70%, assignment week 2). Let's see if you can do even better with an $L$-layer model. 5 - L-layer Neural Network**Question**: Use the helper functions you have implemented previously to build an $L$-layer neural network with the following structure: *[LINEAR -> RELU]$\times$(L-1) -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:```pythondef initialize_parameters_deep(layers_dims): ... return parameters def L_model_forward(X, parameters): ... return AL, cachesdef compute_cost(AL, Y): ... return costdef L_model_backward(AL, Y, caches): ... return gradsdef update_parameters(parameters, grads, learning_rate): ... return parameters```
###Code
### CONSTANTS ###
layers_dims = [12288, 20, 7, 5, 1] # 4-layer model
# GRADED FUNCTION: L_layer_model
def L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):#lr was 0.009
"""
Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.
Arguments:
X -- data, numpy array of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
print_cost -- if True, it prints the cost every 100 steps
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(1)
costs = [] # keep track of cost
# Parameters initialization. (≈ 1 line of code)
### START CODE HERE ###
parameters = initialize_parameters_deep(layers_dims)
### END CODE HERE ###
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
### START CODE HERE ### (≈ 1 line of code)
AL, caches = L_model_forward(X,parameters)
### END CODE HERE ###
# Compute cost.
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(AL,Y)
### END CODE HERE ###
# Backward propagation.
### START CODE HERE ### (≈ 1 line of code)
grads = L_model_backward(AL,Y,caches)
### END CODE HERE ###
# Update parameters.
### START CODE HERE ### (≈ 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
###Output
_____no_output_____
###Markdown
You will now train the model as a 4-layer neural network. Run the cell below to train your model. The cost should decrease on every iteration. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (⬛) on the upper bar of the notebook to stop the cell and try to find your error.
###Code
parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True)
###Output
Cost after iteration 0: 0.771749
Cost after iteration 100: 0.672053
Cost after iteration 200: 0.648263
Cost after iteration 300: 0.611507
Cost after iteration 400: 0.567047
Cost after iteration 500: 0.540138
Cost after iteration 600: 0.527930
Cost after iteration 700: 0.465477
Cost after iteration 800: 0.369126
Cost after iteration 900: 0.391747
Cost after iteration 1000: 0.315187
Cost after iteration 1100: 0.272700
Cost after iteration 1200: 0.237419
Cost after iteration 1300: 0.199601
Cost after iteration 1400: 0.189263
Cost after iteration 1500: 0.161189
Cost after iteration 1600: 0.148214
Cost after iteration 1700: 0.137775
Cost after iteration 1800: 0.129740
Cost after iteration 1900: 0.121225
Cost after iteration 2000: 0.113821
Cost after iteration 2100: 0.107839
Cost after iteration 2200: 0.102855
Cost after iteration 2300: 0.100897
Cost after iteration 2400: 0.092878
###Markdown
**Expected Output**: **Cost after iteration 0** 0.771749 **Cost after iteration 100** 0.672053 **...** ... **Cost after iteration 2400** 0.092878
###Code
pred_train = predict(train_x, train_y, parameters)
###Output
Accuracy: 0.985645933014
###Markdown
**Train Accuracy** 0.985645933014
###Code
pred_test = predict(test_x, test_y, parameters)
###Output
Accuracy: 0.8
###Markdown
**Expected Output**: **Test Accuracy** 0.8 Congrats! It seems that your 4-layer neural network has better performance (80%) than your 2-layer neural network (72%) on the same test set. This is good performance for this task. Nice job! Though in the next course on "Improving deep neural networks" you will learn how to obtain even higher accuracy by systematically searching for better hyperparameters (learning_rate, layers_dims, num_iterations, and others you'll also learn in the next course). 6) Results AnalysisFirst, let's take a look at some images the L-layer model labeled incorrectly. This will show a few mislabeled images.
###Code
print_mislabeled_images(classes, test_x, test_y, pred_test)
###Output
_____no_output_____
###Markdown
**A few types of images the model tends to do poorly on include:** - Cat body in an unusual position- Cat appears against a background of a similar color- Unusual cat color and species- Camera Angle- Brightness of the picture- Scale variation (cat is very large or small in image) 7) Test with your own image (optional/ungraded exercise) Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Change your image's name in the following code 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
###Code
## START CODE HERE ##
my_image = "selectionsort_pseudo.png" # change this to the name of your image file
my_label_y = [1] # the true class of your image (1 -> cat, 0 -> non-cat)
## END CODE HERE ##
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((num_px*num_px*3,1))
my_image = my_image/255.
my_predicted_image = predict(my_image, my_label_y, parameters)
plt.imshow(image)
print ("y = " + str(np.squeeze(my_predicted_image)) + ", your L-layer model predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
###Output
_____no_output_____
###Markdown
Deep Neural Network for Image Classification: ApplicationWhen you finish this, you will have finished the last programming assignment of Week 4, and also the last programming assignment of this course! You will use the functions you'd implemented in the previous assignment to build a deep network, and apply it to cat vs non-cat classification. Hopefully, you will see an improvement in accuracy relative to your previous logistic regression implementation. **After this assignment you will be able to:**- Build and apply a deep neural network to supervised learning. Let's get started! 1 - Packages Let's first import all the packages that you will need during this assignment. - [numpy](https://www.numpy.org/) is the fundamental package for scientific computing with Python.- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.- dnn_app_utils provides the functions implemented in the "Building your Deep Neural Network: Step by Step" assignment to this notebook.- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.
###Code
import time
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
from dnn_app_utils_v3 import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
###Output
_____no_output_____
###Markdown
2 - DatasetYou will use the same "Cat vs non-Cat" dataset as in "Logistic Regression as a Neural Network" (Assignment 2). The model you had built had 70% test accuracy on classifying cats vs non-cats images. Hopefully, your new model will perform a better!**Problem Statement**: You are given a dataset ("data.h5") containing: - a training set of m_train images labelled as cat (1) or non-cat (0) - a test set of m_test images labelled as cat and non-cat - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB).Let's get more familiar with the dataset. Load the data by running the cell below.
###Code
train_x_orig, train_y, test_x_orig, test_y, classes = load_data()
###Output
_____no_output_____
###Markdown
The following code will show you an image in the dataset. Feel free to change the index and re-run the cell multiple times to see other images.
###Code
# Example of a picture
index = 10
plt.imshow(train_x_orig[index])
print ("y = " + str(train_y[0,index]) + ". It's a " + classes[train_y[0,index]].decode("utf-8") + " picture.")
# Explore your dataset
m_train = train_x_orig.shape[0]
num_px = train_x_orig.shape[1]
m_test = test_x_orig.shape[0]
print ("Number of training examples: " + str(m_train))
print ("Number of testing examples: " + str(m_test))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_x_orig shape: " + str(train_x_orig.shape))
print ("train_y shape: " + str(train_y.shape))
print ("test_x_orig shape: " + str(test_x_orig.shape))
print ("test_y shape: " + str(test_y.shape))
###Output
Number of training examples: 209
Number of testing examples: 50
Each image is of size: (64, 64, 3)
train_x_orig shape: (209, 64, 64, 3)
train_y shape: (1, 209)
test_x_orig shape: (50, 64, 64, 3)
test_y shape: (1, 50)
###Markdown
As usual, you reshape and standardize the images before feeding them to the network. The code is given in the cell below. Figure 1: Image to vector conversion.
###Code
# Reshape the training and test examples
train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T # The "-1" makes reshape flatten the remaining dimensions
test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T
# Standardize data to have feature values between 0 and 1.
train_x = train_x_flatten/255.
test_x = test_x_flatten/255.
print ("train_x's shape: " + str(train_x.shape))
print ("test_x's shape: " + str(test_x.shape))
###Output
train_x's shape: (12288, 209)
test_x's shape: (12288, 50)
###Markdown
$12,288$ equals $64 \times 64 \times 3$ which is the size of one reshaped image vector. 3 - Architecture of your model Now that you are familiar with the dataset, it is time to build a deep neural network to distinguish cat images from non-cat images.You will build two different models:- A 2-layer neural network- An L-layer deep neural networkYou will then compare the performance of these models, and also try out different values for $L$. Let's look at the two architectures. 3.1 - 2-layer neural network Figure 2: 2-layer neural network. The model can be summarized as: ***INPUT -> LINEAR -> RELU -> LINEAR -> SIGMOID -> OUTPUT***. Detailed Architecture of figure 2:- The input is a (64,64,3) image which is flattened to a vector of size $(12288,1)$. - The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ of size $(n^{[1]}, 12288)$.- You then add a bias term and take its relu to get the following vector: $[a_0^{[1]}, a_1^{[1]},..., a_{n^{[1]}-1}^{[1]}]^T$.- You then repeat the same process.- You multiply the resulting vector by $W^{[2]}$ and add your intercept (bias). - Finally, you take the sigmoid of the result. If it is greater than 0.5, you classify it to be a cat. 3.2 - L-layer deep neural networkIt is hard to represent an L-layer deep neural network with the above representation. However, here is a simplified network representation: Figure 3: L-layer neural network. The model can be summarized as: ***[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID***Detailed Architecture of figure 3:- The input is a (64,64,3) image which is flattened to a vector of size (12288,1).- The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ and then you add the intercept $b^{[1]}$. The result is called the linear unit.- Next, you take the relu of the linear unit. This process could be repeated several times for each $(W^{[l]}, b^{[l]})$ depending on the model architecture.- Finally, you take the sigmoid of the final linear unit. If it is greater than 0.5, you classify it to be a cat. 3.3 - General methodologyAs usual you will follow the Deep Learning methodology to build the model: 1. Initialize parameters / Define hyperparameters 2. Loop for num_iterations: a. Forward propagation b. Compute cost function c. Backward propagation d. Update parameters (using parameters, and grads from backprop) 4. Use trained parameters to predict labelsLet's now implement those two models! 4 - Two-layer neural network**Question**: Use the helper functions you have implemented in the previous assignment to build a 2-layer neural network with the following structure: *LINEAR -> RELU -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:```pythondef initialize_parameters(n_x, n_h, n_y): ... return parameters def linear_activation_forward(A_prev, W, b, activation): ... return A, cachedef compute_cost(AL, Y): ... return costdef linear_activation_backward(dA, cache, activation): ... return dA_prev, dW, dbdef update_parameters(parameters, grads, learning_rate): ... return parameters```
###Code
### CONSTANTS DEFINING THE MODEL ####
n_x = 12288 # num_px * num_px * 3
n_h = 7
n_y = 1
layers_dims = (n_x, n_h, n_y)
# GRADED FUNCTION: two_layer_model
def two_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):
"""
Implements a two-layer neural network: LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (n_x, number of examples)
Y -- true "label" vector (containing 1 if cat, 0 if non-cat), of shape (1, number of examples)
layers_dims -- dimensions of the layers (n_x, n_h, n_y)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- If set to True, this will print the cost every 100 iterations
Returns:
parameters -- a dictionary containing W1, W2, b1, and b2
"""
np.random.seed(1)
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
(n_x, n_h, n_y) = layers_dims
# Initialize parameters dictionary, by calling one of the functions you'd previously implemented
### START CODE HERE ### (≈ 1 line of code)
parameters = initialize_parameters(n_x, n_h, n_y)
### END CODE HERE ###
# Get W1, b1, W2 and b2 from the dictionary parameters.
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> SIGMOID. Inputs: "X, W1, b1, W2, b2". Output: "A1, cache1, A2, cache2".
### START CODE HERE ### (≈ 2 lines of code)
A1, cache1 = linear_activation_forward(X, W1, b1, activation="relu")
A2, cache2 = linear_activation_forward(A1, W2, b2, activation="sigmoid")
### END CODE HERE ###
# Compute cost
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(A2, Y)
### END CODE HERE ###
# Initializing backward propagation
dA2 = - (np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))
# Backward propagation. Inputs: "dA2, cache2, cache1". Outputs: "dA1, dW2, db2; also dA0 (not used), dW1, db1".
### START CODE HERE ### (≈ 2 lines of code)
dA1, dW2, db2 = linear_activation_backward(dA2, cache2, activation="sigmoid")
dA0, dW1, db1 = linear_activation_backward(dA1, cache1, activation="relu")
### END CODE HERE ###
# Set grads['dWl'] to dW1, grads['db1'] to db1, grads['dW2'] to dW2, grads['db2'] to db2
grads['dW1'] = dW1
grads['db1'] = db1
grads['dW2'] = dW2
grads['db2'] = db2
# Update parameters.
### START CODE HERE ### (approx. 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Retrieve W1, b1, W2, b2 from parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print("Cost after iteration {}: {}".format(i, np.squeeze(cost)))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
###Output
_____no_output_____
###Markdown
Run the cell below to train your parameters. See if your model runs. The cost should be decreasing. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (⬛) on the upper bar of the notebook to stop the cell and try to find your error.
###Code
parameters = two_layer_model(train_x, train_y, layers_dims = (n_x, n_h, n_y), num_iterations = 2500, print_cost=True)
###Output
Cost after iteration 0: 0.6930497356599888
Cost after iteration 100: 0.6464320953428849
Cost after iteration 200: 0.6325140647912677
Cost after iteration 300: 0.6015024920354665
Cost after iteration 400: 0.5601966311605747
Cost after iteration 500: 0.515830477276473
Cost after iteration 600: 0.4754901313943325
Cost after iteration 700: 0.4339163151225749
Cost after iteration 800: 0.4007977536203887
Cost after iteration 900: 0.3580705011323798
Cost after iteration 1000: 0.3394281538366412
Cost after iteration 1100: 0.3052753636196264
Cost after iteration 1200: 0.27491377282130164
Cost after iteration 1300: 0.24681768210614846
Cost after iteration 1400: 0.19850735037466116
Cost after iteration 1500: 0.1744831811255664
Cost after iteration 1600: 0.17080762978096148
Cost after iteration 1700: 0.11306524562164734
Cost after iteration 1800: 0.09629426845937152
Cost after iteration 1900: 0.08342617959726863
Cost after iteration 2000: 0.07439078704319081
Cost after iteration 2100: 0.0663074813226793
Cost after iteration 2200: 0.0591932950103817
Cost after iteration 2300: 0.053361403485605585
Cost after iteration 2400: 0.04855478562877016
###Markdown
**Expected Output**: **Cost after iteration 0** 0.6930497356599888 **Cost after iteration 100** 0.6464320953428849 **...** ... **Cost after iteration 2400** 0.048554785628770226 Good thing you built a vectorized implementation! Otherwise it might have taken 10 times longer to train this.Now, you can use the trained parameters to classify images from the dataset. To see your predictions on the training and test sets, run the cell below.
###Code
predictions_train = predict(train_x, train_y, parameters)
###Output
Accuracy: 1.0
###Markdown
**Expected Output**: **Accuracy** 1.0
###Code
predictions_test = predict(test_x, test_y, parameters)
###Output
Accuracy: 0.72
###Markdown
**Expected Output**: **Accuracy** 0.72 **Note**: You may notice that running the model on fewer iterations (say 1500) gives better accuracy on the test set. This is called "early stopping" and we will talk about it in the next course. Early stopping is a way to prevent overfitting. Congratulations! It seems that your 2-layer neural network has better performance (72%) than the logistic regression implementation (70%, assignment week 2). Let's see if you can do even better with an $L$-layer model. 5 - L-layer Neural Network**Question**: Use the helper functions you have implemented previously to build an $L$-layer neural network with the following structure: *[LINEAR -> RELU]$\times$(L-1) -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:```pythondef initialize_parameters_deep(layers_dims): ... return parameters def L_model_forward(X, parameters): ... return AL, cachesdef compute_cost(AL, Y): ... return costdef L_model_backward(AL, Y, caches): ... return gradsdef update_parameters(parameters, grads, learning_rate): ... return parameters```
###Code
### CONSTANTS ###
layers_dims = [12288, 20, 7, 5, 1] # 4-layer model
# GRADED FUNCTION: L_layer_model
def L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):#lr was 0.009
"""
Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.
Arguments:
X -- data, numpy array of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
print_cost -- if True, it prints the cost every 100 steps
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(1)
costs = [] # keep track of cost
# Parameters initialization. (≈ 1 line of code)
### START CODE HERE ###
parameters = initialize_parameters_deep(layers_dims)
### END CODE HERE ###
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
### START CODE HERE ### (≈ 1 line of code)
AL, caches = L_model_forward(X, parameters)
### END CODE HERE ###
# Compute cost.
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(AL, Y)
### END CODE HERE ###
# Backward propagation.
### START CODE HERE ### (≈ 1 line of code)
grads = L_model_backward(AL, Y, caches)
### END CODE HERE ###
# Update parameters.
### START CODE HERE ### (≈ 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
###Output
_____no_output_____
###Markdown
You will now train the model as a 4-layer neural network. Run the cell below to train your model. The cost should decrease on every iteration. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (⬛) on the upper bar of the notebook to stop the cell and try to find your error.
###Code
parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True)
###Output
Cost after iteration 0: 0.771749
Cost after iteration 100: 0.672053
Cost after iteration 200: 0.648263
Cost after iteration 300: 0.611507
Cost after iteration 400: 0.567047
Cost after iteration 500: 0.540138
Cost after iteration 600: 0.527930
Cost after iteration 700: 0.465477
Cost after iteration 800: 0.369126
Cost after iteration 900: 0.391747
Cost after iteration 1000: 0.315187
Cost after iteration 1100: 0.272700
Cost after iteration 1200: 0.237419
Cost after iteration 1300: 0.199601
Cost after iteration 1400: 0.189263
Cost after iteration 1500: 0.161189
Cost after iteration 1600: 0.148214
Cost after iteration 1700: 0.137775
Cost after iteration 1800: 0.129740
Cost after iteration 1900: 0.121225
Cost after iteration 2000: 0.113821
Cost after iteration 2100: 0.107839
Cost after iteration 2200: 0.102855
Cost after iteration 2300: 0.100897
Cost after iteration 2400: 0.092878
###Markdown
**Expected Output**: **Cost after iteration 0** 0.771749 **Cost after iteration 100** 0.672053 **...** ... **Cost after iteration 2400** 0.092878
###Code
pred_train = predict(train_x, train_y, parameters)
###Output
Accuracy: 0.985645933014
###Markdown
**Train Accuracy** 0.985645933014
###Code
pred_test = predict(test_x, test_y, parameters)
###Output
Accuracy: 0.8
###Markdown
**Expected Output**: **Test Accuracy** 0.8 Congrats! It seems that your 4-layer neural network has better performance (80%) than your 2-layer neural network (72%) on the same test set. This is good performance for this task. Nice job! Though in the next course on "Improving deep neural networks" you will learn how to obtain even higher accuracy by systematically searching for better hyperparameters (learning_rate, layers_dims, num_iterations, and others you'll also learn in the next course). 6) Results AnalysisFirst, let's take a look at some images the L-layer model labeled incorrectly. This will show a few mislabeled images.
###Code
print_mislabeled_images(classes, test_x, test_y, pred_test)
###Output
_____no_output_____
###Markdown
**A few types of images the model tends to do poorly on include:** - Cat body in an unusual position- Cat appears against a background of a similar color- Unusual cat color and species- Camera Angle- Brightness of the picture- Scale variation (cat is very large or small in image) 7) Test with your own image (optional/ungraded exercise) Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Change your image's name in the following code 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
###Code
## START CODE HERE ##
my_image = "my_image.jpg" # change this to the name of your image file
my_label_y = [1] # the true class of your image (1 -> cat, 0 -> non-cat)
## END CODE HERE ##
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((num_px*num_px*3,1))
my_image = my_image/255.
my_predicted_image = predict(my_image, my_label_y, parameters)
plt.imshow(image)
print ("y = " + str(np.squeeze(my_predicted_image)) + ", your L-layer model predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
###Output
_____no_output_____
###Markdown
Deep Neural Network for Image Classification: ApplicationWhen you finish this, you will have finished the last programming assignment of Week 4, and also the last programming assignment of this course! You will use the functions you'd implemented in the previous assignment to build a deep network, and apply it to cat vs non-cat classification. Hopefully, you will see an improvement in accuracy relative to your previous logistic regression implementation. **After this assignment you will be able to:**- Build and apply a deep neural network to supervised learning. Let's get started! 1 - Packages Let's first import all the packages that you will need during this assignment. - [numpy](https://www.numpy.org/) is the fundamental package for scientific computing with Python.- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.- dnn_app_utils provides the functions implemented in the "Building your Deep Neural Network: Step by Step" assignment to this notebook.- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.
###Code
import time
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
from dnn_app_utils_v3 import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
###Output
_____no_output_____
###Markdown
2 - DatasetYou will use the same "Cat vs non-Cat" dataset as in "Logistic Regression as a Neural Network" (Assignment 2). The model you had built had 70% test accuracy on classifying cats vs non-cats images. Hopefully, your new model will perform a better!**Problem Statement**: You are given a dataset ("data.h5") containing: - a training set of m_train images labelled as cat (1) or non-cat (0) - a test set of m_test images labelled as cat and non-cat - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB).Let's get more familiar with the dataset. Load the data by running the cell below.
###Code
train_x_orig, train_y, test_x_orig, test_y, classes = load_data()
###Output
_____no_output_____
###Markdown
The following code will show you an image in the dataset. Feel free to change the index and re-run the cell multiple times to see other images.
###Code
# Example of a picture
index = 10
plt.imshow(train_x_orig[index])
print ("y = " + str(train_y[0,index]) + ". It's a " + classes[train_y[0,index]].decode("utf-8") + " picture.")
# Explore your dataset
m_train = train_x_orig.shape[0]
num_px = train_x_orig.shape[1]
m_test = test_x_orig.shape[0]
print ("Number of training examples: " + str(m_train))
print ("Number of testing examples: " + str(m_test))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_x_orig shape: " + str(train_x_orig.shape))
print ("train_y shape: " + str(train_y.shape))
print ("test_x_orig shape: " + str(test_x_orig.shape))
print ("test_y shape: " + str(test_y.shape))
###Output
Number of training examples: 209
Number of testing examples: 50
Each image is of size: (64, 64, 3)
train_x_orig shape: (209, 64, 64, 3)
train_y shape: (1, 209)
test_x_orig shape: (50, 64, 64, 3)
test_y shape: (1, 50)
###Markdown
As usual, you reshape and standardize the images before feeding them to the network. The code is given in the cell below. Figure 1: Image to vector conversion.
###Code
# Reshape the training and test examples
train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T # The "-1" makes reshape flatten the remaining dimensions
test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T
# Standardize data to have feature values between 0 and 1.
train_x = train_x_flatten/255.
test_x = test_x_flatten/255.
print ("train_x's shape: " + str(train_x.shape))
print ("test_x's shape: " + str(test_x.shape))
###Output
train_x's shape: (12288, 209)
test_x's shape: (12288, 50)
###Markdown
$12,288$ equals $64 \times 64 \times 3$ which is the size of one reshaped image vector. 3 - Architecture of your model Now that you are familiar with the dataset, it is time to build a deep neural network to distinguish cat images from non-cat images.You will build two different models:- A 2-layer neural network- An L-layer deep neural networkYou will then compare the performance of these models, and also try out different values for $L$. Let's look at the two architectures. 3.1 - 2-layer neural network Figure 2: 2-layer neural network. The model can be summarized as: ***INPUT -> LINEAR -> RELU -> LINEAR -> SIGMOID -> OUTPUT***. Detailed Architecture of figure 2:- The input is a (64,64,3) image which is flattened to a vector of size $(12288,1)$. - The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ of size $(n^{[1]}, 12288)$.- You then add a bias term and take its relu to get the following vector: $[a_0^{[1]}, a_1^{[1]},..., a_{n^{[1]}-1}^{[1]}]^T$.- You then repeat the same process.- You multiply the resulting vector by $W^{[2]}$ and add your intercept (bias). - Finally, you take the sigmoid of the result. If it is greater than 0.5, you classify it to be a cat. 3.2 - L-layer deep neural networkIt is hard to represent an L-layer deep neural network with the above representation. However, here is a simplified network representation: Figure 3: L-layer neural network. The model can be summarized as: ***[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID***Detailed Architecture of figure 3:- The input is a (64,64,3) image which is flattened to a vector of size (12288,1).- The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ and then you add the intercept $b^{[1]}$. The result is called the linear unit.- Next, you take the relu of the linear unit. This process could be repeated several times for each $(W^{[l]}, b^{[l]})$ depending on the model architecture.- Finally, you take the sigmoid of the final linear unit. If it is greater than 0.5, you classify it to be a cat. 3.3 - General methodologyAs usual you will follow the Deep Learning methodology to build the model: 1. Initialize parameters / Define hyperparameters 2. Loop for num_iterations: a. Forward propagation b. Compute cost function c. Backward propagation d. Update parameters (using parameters, and grads from backprop) 4. Use trained parameters to predict labelsLet's now implement those two models! 4 - Two-layer neural network**Question**: Use the helper functions you have implemented in the previous assignment to build a 2-layer neural network with the following structure: *LINEAR -> RELU -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:```pythondef initialize_parameters(n_x, n_h, n_y): ... return parameters def linear_activation_forward(A_prev, W, b, activation): ... return A, cachedef compute_cost(AL, Y): ... return costdef linear_activation_backward(dA, cache, activation): ... return dA_prev, dW, dbdef update_parameters(parameters, grads, learning_rate): ... return parameters```
###Code
### CONSTANTS DEFINING THE MODEL ####
n_x = 12288 # num_px * num_px * 3
n_h = 7
n_y = 1
layers_dims = (n_x, n_h, n_y)
# GRADED FUNCTION: two_layer_model
def two_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):
"""
Implements a two-layer neural network: LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (n_x, number of examples)
Y -- true "label" vector (containing 1 if cat, 0 if non-cat), of shape (1, number of examples)
layers_dims -- dimensions of the layers (n_x, n_h, n_y)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- If set to True, this will print the cost every 100 iterations
Returns:
parameters -- a dictionary containing W1, W2, b1, and b2
"""
np.random.seed(1)
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
(n_x, n_h, n_y) = layers_dims
# Initialize parameters dictionary, by calling one of the functions you'd previously implemented
### START CODE HERE ### (≈ 1 line of code)
parameters = initialize_parameters(n_x, n_h, n_y)
### END CODE HERE ###
# Get W1, b1, W2 and b2 from the dictionary parameters.
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> SIGMOID. Inputs: "X, W1, b1, W2, b2". Output: "A1, cache1, A2, cache2".
### START CODE HERE ### (≈ 2 lines of code)
A1, cache1 = linear_activation_forward(X, W1, b1, 'relu')
A2, cache2 = linear_activation_forward(A1, W2, b2, 'sigmoid')
### END CODE HERE ###
# Compute cost
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(A2, Y)
### END CODE HERE ###
# Initializing backward propagation
dA2 = - (np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))
# Backward propagation. Inputs: "dA2, cache2, cache1". Outputs: "dA1, dW2, db2; also dA0 (not used), dW1, db1".
### START CODE HERE ### (≈ 2 lines of code)
dA1, dW2, db2 = linear_activation_backward(dA2, cache2, 'sigmoid')
dA0, dW1, db1 = linear_activation_backward(dA1, cache1, 'relu')
### END CODE HERE ###
# Set grads['dWl'] to dW1, grads['db1'] to db1, grads['dW2'] to dW2, grads['db2'] to db2
grads['dW1'] = dW1
grads['db1'] = db1
grads['dW2'] = dW2
grads['db2'] = db2
# Update parameters.
### START CODE HERE ### (approx. 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Retrieve W1, b1, W2, b2 from parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print("Cost after iteration {}: {}".format(i, np.squeeze(cost)))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
###Output
_____no_output_____
###Markdown
Run the cell below to train your parameters. See if your model runs. The cost should be decreasing. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (⬛) on the upper bar of the notebook to stop the cell and try to find your error.
###Code
parameters = two_layer_model(train_x, train_y, layers_dims = (n_x, n_h, n_y), num_iterations = 2500, print_cost=True)
###Output
Cost after iteration 0: 0.6930497356599888
Cost after iteration 100: 0.6464320953428849
Cost after iteration 200: 0.6325140647912677
Cost after iteration 300: 0.6015024920354665
Cost after iteration 400: 0.5601966311605747
Cost after iteration 500: 0.5158304772764729
Cost after iteration 600: 0.47549013139433255
Cost after iteration 700: 0.43391631512257495
Cost after iteration 800: 0.400797753620389
Cost after iteration 900: 0.3580705011323798
Cost after iteration 1000: 0.3394281538366411
Cost after iteration 1100: 0.3052753636196264
Cost after iteration 1200: 0.2749137728213018
Cost after iteration 1300: 0.24681768210614854
Cost after iteration 1400: 0.19850735037466094
Cost after iteration 1500: 0.17448318112556666
Cost after iteration 1600: 0.17080762978096128
Cost after iteration 1700: 0.11306524562164724
Cost after iteration 1800: 0.09629426845937152
Cost after iteration 1900: 0.08342617959726856
Cost after iteration 2000: 0.07439078704319078
Cost after iteration 2100: 0.06630748132267927
Cost after iteration 2200: 0.05919329501038164
Cost after iteration 2300: 0.05336140348560553
Cost after iteration 2400: 0.048554785628770115
###Markdown
**Expected Output**: **Cost after iteration 0** 0.6930497356599888 **Cost after iteration 100** 0.6464320953428849 **...** ... **Cost after iteration 2400** 0.048554785628770226 Good thing you built a vectorized implementation! Otherwise it might have taken 10 times longer to train this.Now, you can use the trained parameters to classify images from the dataset. To see your predictions on the training and test sets, run the cell below.
###Code
predictions_train = predict(train_x, train_y, parameters)
###Output
Accuracy: 1.0
###Markdown
**Expected Output**: **Accuracy** 1.0
###Code
predictions_test = predict(test_x, test_y, parameters)
###Output
Accuracy: 0.72
###Markdown
**Expected Output**: **Accuracy** 0.72 **Note**: You may notice that running the model on fewer iterations (say 1500) gives better accuracy on the test set. This is called "early stopping" and we will talk about it in the next course. Early stopping is a way to prevent overfitting. Congratulations! It seems that your 2-layer neural network has better performance (72%) than the logistic regression implementation (70%, assignment week 2). Let's see if you can do even better with an $L$-layer model. 5 - L-layer Neural Network**Question**: Use the helper functions you have implemented previously to build an $L$-layer neural network with the following structure: *[LINEAR -> RELU]$\times$(L-1) -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:```pythondef initialize_parameters_deep(layers_dims): ... return parameters def L_model_forward(X, parameters): ... return AL, cachesdef compute_cost(AL, Y): ... return costdef L_model_backward(AL, Y, caches): ... return gradsdef update_parameters(parameters, grads, learning_rate): ... return parameters```
###Code
### CONSTANTS ###
layers_dims = [12288, 20, 7, 5, 1] # 4-layer model
# GRADED FUNCTION: L_layer_model
def L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):#lr was 0.009
"""
Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.
Arguments:
X -- data, numpy array of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
print_cost -- if True, it prints the cost every 100 steps
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(1)
costs = [] # keep track of cost
# Parameters initialization. (≈ 1 line of code)
### START CODE HERE ###
parameters = initialize_parameters_deep(layers_dims)
### END CODE HERE ###
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
### START CODE HERE ### (≈ 1 line of code)
AL, caches = L_model_forward(X, parameters)
### END CODE HERE ###
# Compute cost.
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(AL, Y)
### END CODE HERE ###
# Backward propagation.
### START CODE HERE ### (≈ 1 line of code)
grads = L_model_backward(AL, Y, caches)
### END CODE HERE ###
# Update parameters.
### START CODE HERE ### (≈ 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
###Output
_____no_output_____
###Markdown
You will now train the model as a 4-layer neural network. Run the cell below to train your model. The cost should decrease on every iteration. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (⬛) on the upper bar of the notebook to stop the cell and try to find your error.
###Code
parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True)
###Output
Cost after iteration 0: 0.771749
Cost after iteration 100: 0.672053
Cost after iteration 200: 0.648263
Cost after iteration 300: 0.611507
Cost after iteration 400: 0.567047
Cost after iteration 500: 0.540138
Cost after iteration 600: 0.527930
Cost after iteration 700: 0.465477
Cost after iteration 800: 0.369126
Cost after iteration 900: 0.391747
Cost after iteration 1000: 0.315187
Cost after iteration 1100: 0.272700
Cost after iteration 1200: 0.237419
Cost after iteration 1300: 0.199601
Cost after iteration 1400: 0.189263
Cost after iteration 1500: 0.161189
Cost after iteration 1600: 0.148214
Cost after iteration 1700: 0.137775
Cost after iteration 1800: 0.129740
Cost after iteration 1900: 0.121225
Cost after iteration 2000: 0.113821
Cost after iteration 2100: 0.107839
Cost after iteration 2200: 0.102855
Cost after iteration 2300: 0.100897
Cost after iteration 2400: 0.092878
###Markdown
**Expected Output**: **Cost after iteration 0** 0.771749 **Cost after iteration 100** 0.672053 **...** ... **Cost after iteration 2400** 0.092878
###Code
pred_train = predict(train_x, train_y, parameters)
###Output
Accuracy: 0.985645933014
###Markdown
**Train Accuracy** 0.985645933014
###Code
pred_test = predict(test_x, test_y, parameters)
###Output
Accuracy: 0.8
###Markdown
**Expected Output**: **Test Accuracy** 0.8 Congrats! It seems that your 4-layer neural network has better performance (80%) than your 2-layer neural network (72%) on the same test set. This is good performance for this task. Nice job! Though in the next course on "Improving deep neural networks" you will learn how to obtain even higher accuracy by systematically searching for better hyperparameters (learning_rate, layers_dims, num_iterations, and others you'll also learn in the next course). 6) Results AnalysisFirst, let's take a look at some images the L-layer model labeled incorrectly. This will show a few mislabeled images.
###Code
print_mislabeled_images(classes, test_x, test_y, pred_test)
###Output
_____no_output_____
###Markdown
**A few types of images the model tends to do poorly on include:** - Cat body in an unusual position- Cat appears against a background of a similar color- Unusual cat color and species- Camera Angle- Brightness of the picture- Scale variation (cat is very large or small in image) 7) Test with your own image (optional/ungraded exercise) Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Change your image's name in the following code 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
###Code
## START CODE HERE ##
my_image = "my_image.jpg" # change this to the name of your image file
my_label_y = [1] # the true class of your image (1 -> cat, 0 -> non-cat)
## END CODE HERE ##
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((num_px*num_px*3,1))
my_image = my_image/255.
my_predicted_image = predict(my_image, my_label_y, parameters)
plt.imshow(image)
print ("y = " + str(np.squeeze(my_predicted_image)) + ", your L-layer model predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
###Output
_____no_output_____ |
nodec_experiments/sirx/rl_td3_train.ipynb | ###Markdown
SIRX: Training RLBaseline comparion in terms of total loss and energy.To run this script:1. Please make sure that the required data folder is available at the paths used by the script.You may generate the required data by running the python script```nodec_experiments/sirx/gen_parameters.py```.2. The scripts below: - ```nodec_experiments/sirx/sirx.py``` - ```nodec_experiments/sirx/rl_utils.py``` - ```nodec_experiments/sirx/sirx_utils.py```contain very important utilities for running training , evaluation and plotting scripts. Please make sure that they are available in the python path when running experiments.Reinforcement Learning requires some significant time to train.As neural network intialization is stochastic, please make sure that appropriate seeds are used or expect some variance to paper results. Imports
###Code
%load_ext autoreload
%autoreload 2
import os
import sys
sys.path.append("../../") # append modules from parent dir
import time
import copy
import numpy as np
import gym
from gym.spaces import Box
import numpy as np
import torch
from torchdiffeq import odeint, odeint_adjoint
from nnc.controllers.neural_network.nnc_controllers import NNCDynamics
from nnc.helpers.torch_utils.graphs import drivers_to_tensor
from sirx import SIRDelta, neighborhood_mask, flat_to_channels, GCNNControl
from rl_utils import SIRXEnv, RLGCNN, Actor, Critic
import tianshou as ts
from tianshou.policy import TD3Policy
from tianshou.trainer import offpolicy_trainer
from tianshou.data import Collector, ReplayBuffer, to_torch
from tianshou.exploration import GaussianNoise
from torch.utils.tensorboard import SummaryWriter
device = 'cuda:0'
dtype = torch.float
###Output
_____no_output_____
###Markdown
Graph parameters
###Code
graph = 'lattice'
parameters_folder = '../../../data/parameters/sirx/'
results_folder = '../../../results/sirx/'+graph+'/'
graph_parameters_folder = parameters_folder + '/' + 'lattice' + '/'
adjacency_matrix = torch.load(graph_parameters_folder + 'adjacency.pt', map_location=device).to(dtype)
n_nodes = adjacency_matrix.shape[-1]
drivers = torch.load(graph_parameters_folder + 'drivers.pt', map_location='cpu').to(torch.long)
driver_matrix = drivers_to_tensor(n_nodes, drivers).to(dtype=dtype, device=device)
alpha = adjacency_matrix
beta = driver_matrix
side_size = int(np.sqrt(n_nodes))
###Output
_____no_output_____
###Markdown
Dynamics Parameters
###Code
x0 = torch.load(graph_parameters_folder + 'initial_state.pt').to(device=device, dtype=dtype)
target_subgraph = torch.load(graph_parameters_folder + 'target_subgraph_nodes.pt')
dynamics_params = torch.load(graph_parameters_folder + 'dynamics_parameters.pt')
# budget and rates need to be choosen according to graph size
budget = dynamics_params['budget']
infection_rate = dynamics_params['infection_rate']
recovery_rate = dynamics_params['recovery_rate']
total_time = 5 # determined via no control testing
sirx_dyn = SIRDelta(
adjacency_matrix=alpha,
infection_rate=infection_rate,
recovery_rate=recovery_rate,
driver_matrix=beta,
k_0=0.0,
).to(device=device, dtype=dtype)
rl_dt = 0.01 # RL interaction frequency
env_config={
'sirx' : sirx_dyn,
'target_nodes' : target_subgraph.tolist(),
'dt' : rl_dt,
'T' : total_time,
'ode_solve_method' : 'dopri5',
'reward_type' : 'sum_to_max',
'x0' : x0,
'budget' : budget
}
train_envs = ts.env.DummyVectorEnv([lambda: SIRXEnv(env_config) for _ in range(2)])
test_envs = ts.env.DummyVectorEnv([lambda: SIRXEnv(env_config) for _ in range(2)])
###Output
_____no_output_____
###Markdown
RL Neural NetworksIf you check code you will see that it has the same learnable parameters and structure as the networkused for NODEC before the decision layer.
###Code
mask, ninds = neighborhood_mask(alpha)
in_preprocessor = lambda x: flat_to_channels(x, n_nodes=n_nodes, mask=mask, inds=ninds)
policy_net = RLGCNN(
adjacency_matrix = alpha,
driver_matrix = beta,
input_preprocessor = in_preprocessor,
in_channels=4,
feat_channels=5,
message_passes=4
)
actor = Actor(model = policy_net, device=device).to(device)
actor_optim = torch.optim.Adam(actor.parameters(), lr=0.0003)
critic1 = Critic(1, 4096, 512, device=device).to(device)
critic1_optim = torch.optim.Adam(critic1.parameters(), lr=1e-4)
critic2 = Critic(1, 4096, 512, device=device).to(device)
critic2_optim = torch.optim.Adam(critic2.parameters(), lr=1e-4)
# for transfer learning we can literally load the model
#actor.model.load_state_dict(torch.load('../sir/sirx_best.torch'))
secs = int(round(time.time()))
log_path = results_folder + 'rl/td3/time_'+str(secs)
log_path
# Policy training proceedure
# evaluation environment
env = SIRXEnv(env_config)
# YOu can change TD3 to SAC or any other contious action policy provided from tianshou
policy = TD3Policy(
actor = actor,
actor_optim = actor_optim,
critic1 = critic1,
critic1_optim = critic1_optim,
critic2 = critic2,
critic2_optim = critic2_optim,
tau= 0.005,
gamma = 0.999,
exploration_noise = GaussianNoise(0.01),
policy_noise = 0.001,
update_actor_freq = 5,
noise_clip = 0.5,
action_range = [env.action_space.low[0], env.action_space.high[0]],
reward_normalization = True,
ignore_done = False,
)
# Experience Collector
train_collector = Collector(
policy, train_envs, ReplayBuffer(8000))
test_collector = Collector(policy, test_envs)
writer = SummaryWriter(log_path)
def save_fn(policy):
# save best model
torch.save(policy.state_dict(), os.path.join(log_path, 'policy.pth'))
# trainer
result = offpolicy_trainer(
policy = policy,
train_collector = train_collector,
test_collector = test_collector,
max_epoch = 100,
step_per_epoch = len(env.time_steps),
collect_per_step = 1,
episode_per_test = 1,
batch_size = len(env.time_steps),
save_fn = save_fn,
writer = writer,
log_interval = 1,
verbose = True,
)
###Output
_____no_output_____ |
Training_Notebooks/Seq2point_kitchenoutlet.ipynb | ###Markdown
Test
###Code
#seq2point_test.py
#Tester class
import argparse
import os
import logging
import numpy as np
import keras
import pandas as pd
import tensorflow as tf
import time
from data_feeder_offset import TestSlidingWindowGenerator
from appliance_data import appliance_data, mains_data
import matplotlib.pyplot as plt
class Tester():
""" Used to test and evaluate a pre-trained seq2point model with or without pruning applied.
Parameters:
__appliance (string): The target appliance.
__algorithm (string): The (pruning) algorithm the model was trained with.
__network_type (string): The architecture of the model.
__crop (int): The maximum number of rows of data to evaluate the model with.
__batch_size (int): The number of rows per testing batch.
__window_size (int): The size of eaech sliding window
__window_offset (int): The offset of the inferred value from the sliding window.
__test_directory (string): The directory of the test file for the model.
"""
def __init__(self, model, appliance, algorithm, crop, batch_size, network_type,
test_directory, saved_model_dir, log_file_dir, offset,
input_window_length):
self.model = model
self.__appliance = appliance
self.__algorithm = algorithm
self.__network_type = network_type
self.__crop = crop
self.__batch_size = batch_size
self.__input_window_length = input_window_length
self.__window_size = self.__input_window_length + 2
self.__window_offset = int(offset * self.__window_size - 1)
self.__number_of_windows = 100
self.__test_directory = test_directory
self.__saved_model_dir = saved_model_dir
self.__log_file = log_file_dir
logging.basicConfig(filename=self.__log_file,level=logging.INFO)
def test_model(self):
""" Tests a fully-trained model using a sliding window generator as an input. Measures inference time, gathers, and
plots evaluationg metrics. """
test_input, test_target = self.load_dataset(self.__test_directory)
#model = create_model(self._input_window_length)
#model = load_model(model, self.__network_type, self.__algorithm,
# self.__appliance, self.__saved_model_dir)
model = self.model
test_generator = TestSlidingWindowGenerator(number_of_windows=self.__number_of_windows, inputs=test_input, targets=test_target, offset=self.__window_offset, windowlength = self.__input_window_length)
# Calculate the optimum steps per epoch.
steps_per_test_epoch = np.round(int(test_generator.total_size / self.__batch_size), decimals=0)
# Test the model.
start_time = time.time()
testing_history = model.predict(x=test_generator.load_dataset(), steps=steps_per_test_epoch, verbose=2)
end_time = time.time()
test_time = end_time - start_time
evaluation_metrics = model.evaluate(x=test_generator.load_dataset(), steps=steps_per_test_epoch)
self.log_results(model, test_time, evaluation_metrics)
self.plot_results(testing_history, test_input, test_target)
def load_dataset(self, directory):
"""Loads the testing dataset from the location specified by file_name.
Parameters:
directory (string): The location at which the dataset is stored, concatenated with the file name.
Returns:
test_input (numpy.array): The first n (crop) features of the test dataset.
test_target (numpy.array): The first n (crop) targets of the test dataset.
"""
data_frame = pd.read_csv(directory, nrows=self.__crop, skiprows=0, header=0)
test_input = np.round(np.array(data_frame.iloc[:, 0], float), 6)
test_target = np.round(np.array(data_frame.iloc[self.__window_offset: -self.__window_offset, 1], float), 6)
del data_frame
return test_input, test_target
def log_results(self, model, test_time, evaluation_metrics):
"""Logs the inference time, MAE and MSE of an evaluated model.
Parameters:
model (tf.keras.Model): The evaluated model.
test_time (float): The time taken by the model to infer all required values.
evaluation metrics (list): The MSE, MAE, and various compression ratios of the model.
"""
inference_log = "Inference Time: " + str(test_time)
logging.info(inference_log)
metric_string = "MSE: ", str(evaluation_metrics[0]), " MAE: ", str(evaluation_metrics[3])
logging.info(metric_string)
self.count_pruned_weights(model)
def count_pruned_weights(self, model):
""" Counts the total number of weights, pruned weights, and weights in convolutional
layers. Calculates the sparsity ratio of different layer types and logs these values.
Parameters:
model (tf.keras.Model): The evaluated model.
"""
num_total_zeros = 0
num_dense_zeros = 0
num_dense_weights = 0
num_conv_zeros = 0
num_conv_weights = 0
for layer in model.layers:
if np.shape(layer.get_weights())[0] != 0:
layer_weights = layer.get_weights()[0].flatten()
if "conv" in layer.name:
num_conv_weights += np.size(layer_weights)
num_conv_zeros += np.count_nonzero(layer_weights==0)
num_total_zeros += np.size(layer_weights)
else:
num_dense_weights += np.size(layer_weights)
num_dense_zeros += np.count_nonzero(layer_weights==0)
conv_zeros_string = "CONV. ZEROS: " + str(num_conv_zeros)
conv_weights_string = "CONV. WEIGHTS: " + str(num_conv_weights)
conv_sparsity_ratio = "CONV. RATIO: " + str(num_conv_zeros / num_conv_weights)
dense_weights_string = "DENSE WEIGHTS: " + str(num_dense_weights)
dense_zeros_string = "DENSE ZEROS: " + str(num_dense_zeros)
dense_sparsity_ratio = "DENSE RATIO: " + str(num_dense_zeros / num_dense_weights)
total_zeros_string = "TOTAL ZEROS: " + str(num_total_zeros)
total_weights_string = "TOTAL WEIGHTS: " + str(model.count_params())
total_sparsity_ratio = "TOTAL RATIO: " + str(num_total_zeros / model.count_params())
print("LOGGING PATH: ", self.__log_file)
logging.info(conv_zeros_string)
logging.info(conv_weights_string)
logging.info(conv_sparsity_ratio)
logging.info("")
logging.info(dense_zeros_string)
logging.info(dense_weights_string)
logging.info(dense_sparsity_ratio)
logging.info("")
logging.info(total_zeros_string)
logging.info(total_weights_string)
logging.info(total_sparsity_ratio)
def plot_results(self, testing_history, test_input, test_target):
""" Generates and saves a plot of the testing history of the model against the (actual)
aggregate energy values and the true appliance values.
Parameters:
testing_history (numpy.ndarray): The series of values inferred by the model.
test_input (numpy.ndarray): The aggregate energy data.
test_target (numpy.ndarray): The true energy values of the appliance.
"""
testing_history = ((testing_history * 28.16) + 12.27)
test_target = ((test_target * 28.16) + 12.27)
test_agg = (test_input.flatten() * mains_data["std"]) + mains_data["mean"]
test_agg = test_agg[:testing_history.size]
# Can't have negative energy readings - set any results below 0 to 0.
test_target[test_target < 0] = 0
testing_history[testing_history < 0] = 0
test_input[test_input < 0] = 0
# Plot testing outcomes against ground truth.
plt.figure(1)
print(self.__window_offset)
print(test_agg.size)
print(test_target.size)
print(testing_history.size)
plt.plot(test_agg[self.__window_offset: -self.__window_offset], label="Aggregate")
plt.plot(test_target[:test_agg.size - (2 * self.__window_offset)], label="Ground Truth")
plt.plot(testing_history[:test_agg.size - (2 * self.__window_offset)], label="Predicted")
plt.title(self.__appliance + " " + self.__network_type + "(" + self.__algorithm + ")")
plt.ylabel("Power Value (Watts)")
plt.xlabel("Testing Window")
plt.legend()
x1 = 4500
x2 = 6000
plt.figure(2)
plt.plot(test_agg[self.__window_offset + x1: x2], label="Aggregate")
plt.plot(test_target[x1:x2], label="Ground Truth")
plt.plot(testing_history[x1:x2], label="Predicted")
x3 = 4900
x4 = 5500
plt.figure(3)
plt.plot(test_agg[self.__window_offset + x3: x4], label="Aggregate")
plt.plot(test_target[x3:x4], label="Ground Truth")
plt.plot(testing_history[x3:x4], label="Predicted")
#file_path = "./" + self.__appliance + "/saved_models/" + self.__appliance + "_" + self.__algorithm + "_" + self.__network_type + "_test_figure.png"
#plt.savefig(fname=file_path)
plt.show()
test_directory= "/content/drive/MyDrive/energy_disaggregation/seq2point-nilm/train_kitchenoutlet_stove/kitchenoutlet_test_.csv"
parser = argparse.ArgumentParser(description="Train a pruned neural network for energy disaggregation. ")
parser.add_argument("-f", "--fff", help="a dummy argument to fool ipython", default="1")
parser.add_argument("--appliance_name", type=remove_space, default="kitchenoutlet", help="The name of the appliance to perform disaggregation with. Default is kettle. Available are: kettle, fridge, dishwasher, microwave. ")
parser.add_argument("--batch_size", type=int, default="2000", help="The batch size to use when training the network. Default is 1000. ")
parser.add_argument("--crop", type=int, default="10000", help="The number of rows of the dataset to take training data from. Default is 10000. ")
parser.add_argument("--algorithm", type=remove_space, default="seq2point", help="The pruning algorithm of the model to test. Default is none. ")
parser.add_argument("--network_type", type=remove_space, default="", help="The seq2point architecture to use. Only use if you do not want to use the standard architecture. Available are: default, dropout, reduced, and reduced_dropout. ")
parser.add_argument("--offset", type=float, default="0.3", help="Offset from window")
parser.add_argument("--input_window_length", type=int, default="21", help="Number of input data points to network. Default is 599. ")
parser.add_argument("--test_directory", type=str, default=test_directory, help="The dir for training data. ")
arguments = parser.parse_args()
# You need to provide the trained model
saved_model_dir = os.path.join(path, "saved_models/" + arguments.appliance_name + "_" + arguments.network_type + "_model.h5")
# The logs including results will be recorded to this log file
log_file_dir = os.path.join(path,"saved_models/" + arguments.appliance_name + "_" + arguments.algorithm + "_" + arguments.network_type + ".log")
df_test = pd.read_csv(test_directory)
len(df_test)
tester = Tester(model, arguments.appliance_name, arguments.algorithm, 190000,
arguments.batch_size, arguments.network_type,
arguments.test_directory, saved_model_dir, log_file_dir, arguments.offset,
arguments.input_window_length)
tester.test_model()
###Output
89/89 - 0s
89/89 [==============================] - 0s 2ms/step - loss: 5.5807 - mse: 5.5807 - msle: 0.0671 - mae: 0.5748
LOGGING PATH: /content/drive/MyDrive/energy_disaggregation/seq2point-nilm/saved_models/kitchenoutlet_seq2point_.log
5
8900
178014
8900
|
OOP/shirt_exercise.ipynb | ###Markdown
Use the Shirt ClassYou've seen what a class looks like and how to instantiate an object. Now it's your turn to write code that insantiates a shirt object. Explanation of the CodeThis Jupyter notebook is inside of a folder called 1.OOP_syntax_shirt_practice. You can see the folder if you click on the "Jupyter" logo above the notebook. Inside the folder are three files:- shirt_exercise.ipynb, which is the file you are currently looking at- answer.py containing answers to the exercise- tests.py, tests for checking your code - you can run these tests using the last code cell at the bottom of this notebook Your TaskThe shirt_exercise.ipynb file, which you are currently looking at if you are reading this, has an exercise to help guide you through coding with an object in Python.Fill out the TODOs in each section of the Jupyter notebook. You can find a solution in the answer.py file.First, run this code cell below to load the Shirt class.
###Code
class Shirt:
def __init__(self, shirt_color, shirt_size, shirt_style, shirt_price):
self.color = shirt_color
self.size = shirt_size
self.style = shirt_style
self.price = shirt_price
def change_price(self, new_price):
self.price = new_price
def discount(self, discount):
return self.price * (1 - discount)
### TODO:
# - insantiate a shirt object with the following characteristics:
# - color red, size S, style long-sleeve, and price 25
# - store the object in a variable called shirt_one
#
#
###
Shirt('red', 'S', 'long-sleeve', 25)
shirt_one = Shirt('red', 'S', 'long-sleeve', 25)
### TODO:
# - print the price of the shirt using the price attribute
# - use the change_price method to change the price of the shirt to 10
# - print the price of the shirt using the price attribute
# - use the discount method to print the price of the shirt with a 12% discount
#
###
print(shirt_one.price)
shirt_one.change_price(10)
print(shirt_one.price)
print(shirt_one.discount(.12))
### TODO:
#
# - instantiate another object with the following characteristics:
# . - color orange, size L, style short-sleeve, and price 10
# - store the object in a variable called shirt_two
#
###
shirt_two = Shirt('orange', 'L', 'short-sleeve', 10)
### TODO:
#
# - calculate the total cost of shirt_one and shirt_two
# - store the results in a variable called total
#
###
total_cost = shirt_one.price + shirt_two.price
### TODO:
#
# - use the shirt discount method to calculate the total cost if
# shirt_one has a discount of 14% and shirt_two has a discount
# of 6%
# - store the results in a variable called total_discount
###
total_discount = shirt_one.discount(.14) + shirt_two.discount(.06)
###Output
_____no_output_____
###Markdown
Test your CodeThe following code cell tests your code. There is a file called tests.py containing a function called run_tests(). The run_tests() function executes a handful of assert statements to check your work. You can see this file if you go to the Jupyter Notebook menu and click on "File->Open" and then open the tests.py file.Execute the next code cell. The code will produce an error if your answers in this exercise are not what was expected. Keep working on your code until all tests are passing.If you run the code cell and there is no output, then you passed all the tests!As mentioned previously, there's also a file with a solution. To find the solution, click on the Jupyter logo at the top of the workspace, and then enter the folder titled 1.OOP_syntax_shirt_practice
###Code
# Unit tests to check your solution
from tests import run_tests
run_tests(shirt_one, shirt_two, total, total_discount)
###Output
_____no_output_____ |
Neural Networks and Deep Learning/Week 4/Deep Neural Network Application_ Image Classification/.ipynb_checkpoints/Deep Neural Network - Application v8-checkpoint.ipynb | ###Markdown
Deep Neural Network for Image Classification: ApplicationWhen you finish this, you will have finished the last programming assignment of Week 4, and also the last programming assignment of this course! You will use use the functions you'd implemented in the previous assignment to build a deep network, and apply it to cat vs non-cat classification. Hopefully, you will see an improvement in accuracy relative to your previous logistic regression implementation. **After this assignment you will be able to:**- Build and apply a deep neural network to supervised learning. Let's get started! 1 - Packages Let's first import all the packages that you will need during this assignment. - [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.- dnn_app_utils provides the functions implemented in the "Building your Deep Neural Network: Step by Step" assignment to this notebook.- np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.
###Code
import time
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
from dnn_app_utils_v3 import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
###Output
_____no_output_____
###Markdown
2 - DatasetYou will use the same "Cat vs non-Cat" dataset as in "Logistic Regression as a Neural Network" (Assignment 2). The model you had built had 70% test accuracy on classifying cats vs non-cats images. Hopefully, your new model will perform a better!**Problem Statement**: You are given a dataset ("data.h5") containing: - a training set of m_train images labelled as cat (1) or non-cat (0) - a test set of m_test images labelled as cat and non-cat - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB).Let's get more familiar with the dataset. Load the data by running the cell below.
###Code
train_x_orig, train_y, test_x_orig, test_y, classes = load_data()
###Output
_____no_output_____
###Markdown
The following code will show you an image in the dataset. Feel free to change the index and re-run the cell multiple times to see other images.
###Code
# Example of a picture
index = 10
plt.imshow(train_x_orig[index])
print ("y = " + str(train_y[0,index]) + ". It's a " + classes[train_y[0,index]].decode("utf-8") + " picture.")
# Explore your dataset
m_train = train_x_orig.shape[0]
num_px = train_x_orig.shape[1]
m_test = test_x_orig.shape[0]
print ("Number of training examples: " + str(m_train))
print ("Number of testing examples: " + str(m_test))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_x_orig shape: " + str(train_x_orig.shape))
print ("train_y shape: " + str(train_y.shape))
print ("test_x_orig shape: " + str(test_x_orig.shape))
print ("test_y shape: " + str(test_y.shape))
###Output
Number of training examples: 209
Number of testing examples: 50
Each image is of size: (64, 64, 3)
train_x_orig shape: (209, 64, 64, 3)
train_y shape: (1, 209)
test_x_orig shape: (50, 64, 64, 3)
test_y shape: (1, 50)
###Markdown
As usual, you reshape and standardize the images before feeding them to the network. The code is given in the cell below. Figure 1: Image to vector conversion.
###Code
# Reshape the training and test examples
train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T # The "-1" makes reshape flatten the remaining dimensions
test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T
# Standardize data to have feature values between 0 and 1.
train_x = train_x_flatten/255.
test_x = test_x_flatten/255.
print ("train_x's shape: " + str(train_x.shape))
print ("test_x's shape: " + str(test_x.shape))
###Output
train_x's shape: (12288, 209)
test_x's shape: (12288, 50)
###Markdown
$12,288$ equals $64 \times 64 \times 3$ which is the size of one reshaped image vector. 3 - Architecture of your model Now that you are familiar with the dataset, it is time to build a deep neural network to distinguish cat images from non-cat images.You will build two different models:- A 2-layer neural network- An L-layer deep neural networkYou will then compare the performance of these models, and also try out different values for $L$. Let's look at the two architectures. 3.1 - 2-layer neural network Figure 2: 2-layer neural network. The model can be summarized as: ***INPUT -> LINEAR -> RELU -> LINEAR -> SIGMOID -> OUTPUT***. Detailed Architecture of figure 2:- The input is a (64,64,3) image which is flattened to a vector of size $(12288,1)$. - The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ of size $(n^{[1]}, 12288)$.- You then add a bias term and take its relu to get the following vector: $[a_0^{[1]}, a_1^{[1]},..., a_{n^{[1]}-1}^{[1]}]^T$.- You then repeat the same process.- You multiply the resulting vector by $W^{[2]}$ and add your intercept (bias). - Finally, you take the sigmoid of the result. If it is greater than 0.5, you classify it to be a cat. 3.2 - L-layer deep neural networkIt is hard to represent an L-layer deep neural network with the above representation. However, here is a simplified network representation: Figure 3: L-layer neural network. The model can be summarized as: ***[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID***Detailed Architecture of figure 3:- The input is a (64,64,3) image which is flattened to a vector of size (12288,1).- The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ and then you add the intercept $b^{[1]}$. The result is called the linear unit.- Next, you take the relu of the linear unit. This process could be repeated several times for each $(W^{[l]}, b^{[l]})$ depending on the model architecture.- Finally, you take the sigmoid of the final linear unit. If it is greater than 0.5, you classify it to be a cat. 3.3 - General methodologyAs usual you will follow the Deep Learning methodology to build the model: 1. Initialize parameters / Define hyperparameters 2. Loop for num_iterations: a. Forward propagation b. Compute cost function c. Backward propagation d. Update parameters (using parameters, and grads from backprop) 4. Use trained parameters to predict labelsLet's now implement those two models! 4 - Two-layer neural network**Question**: Use the helper functions you have implemented in the previous assignment to build a 2-layer neural network with the following structure: *LINEAR -> RELU -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:```pythondef initialize_parameters(n_x, n_h, n_y): ... return parameters def linear_activation_forward(A_prev, W, b, activation): ... return A, cachedef compute_cost(AL, Y): ... return costdef linear_activation_backward(dA, cache, activation): ... return dA_prev, dW, dbdef update_parameters(parameters, grads, learning_rate): ... return parameters```
###Code
### CONSTANTS DEFINING THE MODEL ####
n_x = 12288 # num_px * num_px * 3
n_h = 7
n_y = 1
layers_dims = (n_x, n_h, n_y)
# GRADED FUNCTION: two_layer_model
def two_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):
"""
Implements a two-layer neural network: LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (n_x, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- dimensions of the layers (n_x, n_h, n_y)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- If set to True, this will print the cost every 100 iterations
Returns:
parameters -- a dictionary containing W1, W2, b1, and b2
"""
np.random.seed(1)
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
(n_x, n_h, n_y) = layers_dims
# Initialize parameters dictionary, by calling one of the functions you'd previously implemented
### START CODE HERE ### (≈ 1 line of code)
parameters = None
### END CODE HERE ###
# Get W1, b1, W2 and b2 from the dictionary parameters.
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> SIGMOID. Inputs: "X, W1, b1, W2, b2". Output: "A1, cache1, A2, cache2".
### START CODE HERE ### (≈ 2 lines of code)
A1, cache1 = None
A2, cache2 = None
### END CODE HERE ###
# Compute cost
### START CODE HERE ### (≈ 1 line of code)
cost = None
### END CODE HERE ###
# Initializing backward propagation
dA2 = - (np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))
# Backward propagation. Inputs: "dA2, cache2, cache1". Outputs: "dA1, dW2, db2; also dA0 (not used), dW1, db1".
### START CODE HERE ### (≈ 2 lines of code)
dA1, dW2, db2 = None
dA0, dW1, db1 = None
### END CODE HERE ###
# Set grads['dWl'] to dW1, grads['db1'] to db1, grads['dW2'] to dW2, grads['db2'] to db2
grads['dW1'] = dW1
grads['db1'] = db1
grads['dW2'] = dW2
grads['db2'] = db2
# Update parameters.
### START CODE HERE ### (approx. 1 line of code)
parameters = None
### END CODE HERE ###
# Retrieve W1, b1, W2, b2 from parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print("Cost after iteration {}: {}".format(i, np.squeeze(cost)))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
###Output
_____no_output_____
###Markdown
Run the cell below to train your parameters. See if your model runs. The cost should be decreasing. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (⬛) on the upper bar of the notebook to stop the cell and try to find your error.
###Code
parameters = two_layer_model(train_x, train_y, layers_dims = (n_x, n_h, n_y), num_iterations = 2500, print_cost=True)
###Output
_____no_output_____
###Markdown
**Expected Output**: **Cost after iteration 0** 0.6930497356599888 **Cost after iteration 100** 0.6464320953428849 **...** ... **Cost after iteration 2400** 0.048554785628770206 Good thing you built a vectorized implementation! Otherwise it might have taken 10 times longer to train this.Now, you can use the trained parameters to classify images from the dataset. To see your predictions on the training and test sets, run the cell below.
###Code
predictions_train = predict(train_x, train_y, parameters)
###Output
_____no_output_____
###Markdown
**Expected Output**: **Accuracy** 1.0
###Code
predictions_test = predict(test_x, test_y, parameters)
###Output
_____no_output_____
###Markdown
**Expected Output**: **Accuracy** 0.72 **Note**: You may notice that running the model on fewer iterations (say 1500) gives better accuracy on the test set. This is called "early stopping" and we will talk about it in the next course. Early stopping is a way to prevent overfitting. Congratulations! It seems that your 2-layer neural network has better performance (72%) than the logistic regression implementation (70%, assignment week 2). Let's see if you can do even better with an $L$-layer model. 5 - L-layer Neural Network**Question**: Use the helper functions you have implemented previously to build an $L$-layer neural network with the following structure: *[LINEAR -> RELU]$\times$(L-1) -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:```pythondef initialize_parameters_deep(layers_dims): ... return parameters def L_model_forward(X, parameters): ... return AL, cachesdef compute_cost(AL, Y): ... return costdef L_model_backward(AL, Y, caches): ... return gradsdef update_parameters(parameters, grads, learning_rate): ... return parameters```
###Code
### CONSTANTS ###
layers_dims = [12288, 20, 7, 5, 1] # 4-layer model
# GRADED FUNCTION: L_layer_model
def L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):#lr was 0.009
"""
Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.
Arguments:
X -- data, numpy array of shape (number of examples, num_px * num_px * 3)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
print_cost -- if True, it prints the cost every 100 steps
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(1)
costs = [] # keep track of cost
# Parameters initialization. (≈ 1 line of code)
### START CODE HERE ###
parameters = None
### END CODE HERE ###
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
### START CODE HERE ### (≈ 1 line of code)
AL, caches = None
### END CODE HERE ###
# Compute cost.
### START CODE HERE ### (≈ 1 line of code)
cost = None
### END CODE HERE ###
# Backward propagation.
### START CODE HERE ### (≈ 1 line of code)
grads = None
### END CODE HERE ###
# Update parameters.
### START CODE HERE ### (≈ 1 line of code)
parameters = None
### END CODE HERE ###
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
###Output
_____no_output_____
###Markdown
You will now train the model as a 4-layer neural network. Run the cell below to train your model. The cost should decrease on every iteration. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (⬛) on the upper bar of the notebook to stop the cell and try to find your error.
###Code
parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True)
###Output
_____no_output_____
###Markdown
**Expected Output**: **Cost after iteration 0** 0.771749 **Cost after iteration 100** 0.672053 **...** ... **Cost after iteration 2400** 0.092878
###Code
pred_train = predict(train_x, train_y, parameters)
###Output
_____no_output_____
###Markdown
**Train Accuracy** 0.985645933014
###Code
pred_test = predict(test_x, test_y, parameters)
###Output
_____no_output_____
###Markdown
**Expected Output**: **Test Accuracy** 0.8 Congrats! It seems that your 4-layer neural network has better performance (80%) than your 2-layer neural network (72%) on the same test set. This is good performance for this task. Nice job! Though in the next course on "Improving deep neural networks" you will learn how to obtain even higher accuracy by systematically searching for better hyperparameters (learning_rate, layers_dims, num_iterations, and others you'll also learn in the next course). 6) Results AnalysisFirst, let's take a look at some images the L-layer model labeled incorrectly. This will show a few mislabeled images.
###Code
print_mislabeled_images(classes, test_x, test_y, pred_test)
###Output
_____no_output_____
###Markdown
**A few types of images the model tends to do poorly on include:** - Cat body in an unusual position- Cat appears against a background of a similar color- Unusual cat color and species- Camera Angle- Brightness of the picture- Scale variation (cat is very large or small in image) 7) Test with your own image (optional/ungraded exercise) Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Change your image's name in the following code 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
###Code
## START CODE HERE ##
my_image = "my_image.jpg" # change this to the name of your image file
my_label_y = [1] # the true class of your image (1 -> cat, 0 -> non-cat)
## END CODE HERE ##
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((num_px*num_px*3,1))
my_image = my_image/255.
my_predicted_image = predict(my_image, my_label_y, parameters)
plt.imshow(image)
print ("y = " + str(np.squeeze(my_predicted_image)) + ", your L-layer model predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
###Output
_____no_output_____ |
lasagne/Lasagne_model_part1.ipynb | ###Markdown
Multilayer perceptron Here’s an example of my script in which I make multilayer perceptron on some financial data. The data has 53 independent variables (X) and one independent variable (y). It is time series data and X exhibits plenty of time-series related information. The goal is to fit MP as good as possible. We import the packages:
###Code
import numpy as np
import sys
import theano
import theano.tensor as T
import matplotlib.pyplot as plt ### Not needed at the moment, but can be useful to display plots.
import lasagne
from lasagne.nonlinearities import leaky_rectify, softmax, sigmoid, tanh
###Output
_____no_output_____
###Markdown
Read data in and print the amount of data we are dealing with:
###Code
input_data_all = np.load('train_sample.npy')
output_data_all= np.load('train_target.npy')
n_samples= input_data_all.shape[0]
print(n_samples)
###Output
96674
###Markdown
Shuffle two arrays together while preserving their correspondence - so we can mix target and dependent variable together.
###Code
def shuffle_together(input_1, input_2):
if input_1.shape[0]!= input_2.shape[0]:
print ("Problem, y and x array are not of the same shape.")
return None
c= np.arange(input_1.shape[0])
np.random.shuffle(c)
return input_1[c], input_2[c], c
###Output
_____no_output_____
###Markdown
Create Lasagne layers given array of different numbers of dimensions, input to layers, x_variable and which nonlinearities to choose.
###Code
def create_layers(l_dim,my_input,x_variable,l_nonlinearities):
nr_layers = len(l_dim)
layers = [] ### Create an array where we will input layers, starting with input layer.
layers.append(lasagne.layers.InputLayer(shape=my_input.shape, input_var=x_variable))
if len(l_dim)==1:
return(layers)
else:
i = 1
while i<len(l_dim):
layers.append(lasagne.layers.DenseLayer(layers[i-1], num_units=l_dim[i],nonlinearity=l_nonlinearities[i]))
i += 1
return(layers)
###Output
_____no_output_____
###Markdown
Shuffle input and output data and create Theano variables for input and target minibatch;
###Code
input_data_all, output_data_all, _ = shuffle_together(input_data_all, output_data_all)
n_validation_samples= int(0.1*n_samples)
validation_input= input_data_all[-1*n_validation_samples:]
validation_output= output_data_all[-1*n_validation_samples:]
n_train_samples= n_samples-n_validation_samples
train_input= input_data_all[:n_train_samples]
train_output= output_data_all[:n_train_samples]
print(train_input.shape)
print(train_output.shape)
x=T.matrix('x', dtype= theano.config.floatX)
y=T.vector('y', dtype= theano.config.floatX)
###Output
(87007, 53)
(87007,)
###Markdown
Create layers. Here we set up how layers will be assembled. Also, we get the last layer output.
###Code
layers_dimensions = [input_data_all.shape[1],40,25,15,8,4,1]
### Above means that first layer will have 40, second 25, etc. dimensions.
layers_nonlinearities = [None,lasagne.nonlinearities.rectify,lasagne.nonlinearities.tanh,
lasagne.nonlinearities.tanh,lasagne.nonlinearities.rectify,lasagne.nonlinearities.linear,None]
network = create_layers(l_dim = layers_dimensions,my_input=train_input,x_variable=x,l_nonlinearities=layers_nonlinearities)
output_layer = network[-1]
output_val = lasagne.layers.get_output(output_layer)
###Output
_____no_output_____
###Markdown
Create loss function. After that, define updates and how they will be made.
###Code
prediction = lasagne.layers.get_output(output_layer)
loss = lasagne.objectives.squared_error(prediction, y)
loss = lasagne.objectives.aggregate(loss, mode = 'mean')
params = lasagne.layers.get_all_params(output_layer, trainable=True)
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.0000001, momentum=0.9)
###Output
_____no_output_____
###Markdown
We define sample function (calculates output) and train_fn, which updates the parameters.
###Code
sample_function= theano.function(inputs= [x], outputs=[output_val])
train_fn = theano.function([x, y], loss, updates=updates)
###Output
WARNING (theano.gof.compilelock): Overriding existing lock by dead process '15748' (I am process '11284')
###Markdown
Example of usage of sample function:
###Code
[sample_out]= sample_function(validation_input)
sample_out
###Output
_____no_output_____
###Markdown
Quick evaluation of validation output without trained parameters (so, default params):
###Code
print(np.std(sample_out))
print(np.mean(sample_out))
###Output
0.1149251857400123
0.02464972597834169
###Markdown
Below is the main code. We choose batch size, num epochs, save the results in r2_error object. We shuffle training data before each epoch. In each epoch we then go through shuffled data, train model on each input/output batch. In the end, we make calculation every x epochs on how well the validation set performs and print out the performance. In this way, we follow progress on validation output.
###Code
batch_size= 200
num_epochs = 50000
n_batches= train_input.shape[0]//batch_size
total_it= num_epochs*n_batches
if 'r2_error' in locals():
r2_error= np.concatenate([r2_error, np.zeros(total_it)], axis=0)
else:
r2_error= np.zeros(total_it)
previous_count=0
train_input, train_output, _= shuffle_together(train_input, train_output)
# train network (assuming you've got some training data in numpy arrays)
ss_tot = np.sum((validation_output-np.mean(validation_output))**2)
for epoch in range(num_epochs):
train_loss = 0
for it in range(n_batches):
curr_it= n_batches*epoch+it +previous_count
input_batch= train_input[it*batch_size:(it+1)*batch_size,:]
output_batch= train_output[it*batch_size:(it+1)*batch_size]
train_loss += train_fn(input_batch, output_batch)
[sample_out]= sample_function(validation_input)
SS_res = np.sum((sample_out-validation_output)**2)
r2_error[curr_it]= 1 - SS_res/ss_tot
print("Epoch %d: Loss %g" % (epoch + 1, train_loss / len(input_data_all)))
print("iteration:",curr_it,"r2_error:",r2_error[curr_it])
train_input, train_output, _= shuffle_together(train_input, train_output)
###Output
Epoch 1: Loss 0.0113295
iteration: 434 r2_error: -574786.536903361
Epoch 2: Loss 0.0110627
iteration: 869 r2_error: -562417.8391131927
Epoch 3: Loss 0.0108323
iteration: 1304 r2_error: -551496.2178449756
Epoch 4: Loss 0.0106286
iteration: 1739 r2_error: -541618.8851601696
Epoch 5: Loss 0.010443
iteration: 2174 r2_error: -532529.351458485
Epoch 6: Loss 0.0102688
iteration: 2609 r2_error: -523914.6664194066
Epoch 7: Loss 0.0101021
iteration: 3044 r2_error: -515568.29449457163
Epoch 8: Loss 0.00993871
iteration: 3479 r2_error: -507299.2015407688
Epoch 9: Loss 0.00977594
iteration: 3914 r2_error: -499024.0634955908
Epoch 10: Loss 0.00961317
iteration: 4349 r2_error: -490679.59464380343
Epoch 11: Loss 0.00945015
iteration: 4784 r2_error: -482254.5404002292
Epoch 12: Loss 0.00928663
iteration: 5219 r2_error: -473860.0078922612
Epoch 13: Loss 0.00912343
iteration: 5654 r2_error: -465362.940379234
Epoch 14: Loss 0.008958
iteration: 6089 r2_error: -456633.608811546
Epoch 15: Loss 0.00878911
iteration: 6524 r2_error: -447687.23555651016
Epoch 16: Loss 0.00861428
iteration: 6959 r2_error: -438397.8653092252
Epoch 17: Loss 0.00843247
iteration: 7394 r2_error: -428631.15081965725
Epoch 18: Loss 0.00823904
iteration: 7829 r2_error: -418137.89109939977
Epoch 19: Loss 0.00802841
iteration: 8264 r2_error: -406422.6391345731
Epoch 20: Loss 0.00779033
iteration: 8699 r2_error: -393240.33461963356
Epoch 21: Loss 0.00751781
iteration: 9134 r2_error: -377866.09309058887
Epoch 22: Loss 0.0071898
iteration: 9569 r2_error: -359398.54031686846
Epoch 23: Loss 0.00681334
iteration: 10004 r2_error: -339913.5500439469
Epoch 24: Loss 0.00644261
iteration: 10439 r2_error: -321515.97231211734
Epoch 25: Loss 0.00610364
iteration: 10874 r2_error: -305074.0016951969
Epoch 26: Loss 0.00580289
iteration: 11309 r2_error: -290285.5245368572
Epoch 27: Loss 0.00553487
iteration: 11744 r2_error: -276990.70227416285
Epoch 28: Loss 0.00529421
iteration: 12179 r2_error: -265184.36308476055
Epoch 29: Loss 0.00507525
iteration: 12614 r2_error: -254440.22171959776
Epoch 30: Loss 0.00487377
iteration: 13049 r2_error: -244664.7326090627
Epoch 31: Loss 0.00468641
iteration: 13484 r2_error: -235472.47497638775
Epoch 32: Loss 0.00451021
iteration: 13919 r2_error: -226859.44281233096
Epoch 33: Loss 0.00434322
iteration: 14354 r2_error: -218633.0047711695
Epoch 34: Loss 0.00418416
iteration: 14789 r2_error: -210745.34760915872
Epoch 35: Loss 0.00403097
iteration: 15224 r2_error: -203063.92478993975
Epoch 36: Loss 0.00388035
iteration: 15659 r2_error: -195407.5093264816
Epoch 37: Loss 0.0037307
iteration: 16094 r2_error: -187867.70305048485
Epoch 38: Loss 0.0035829
iteration: 16529 r2_error: -180464.29533265467
Epoch 39: Loss 0.003437
iteration: 16964 r2_error: -173160.17664003716
Epoch 40: Loss 0.00329363
iteration: 17399 r2_error: -165975.42218118036
Epoch 41: Loss 0.0031557
iteration: 17834 r2_error: -159145.9809362754
Epoch 42: Loss 0.00302561
iteration: 18269 r2_error: -152729.56344029153
Epoch 43: Loss 0.00290362
iteration: 18704 r2_error: -146664.03572100116
Epoch 44: Loss 0.00278797
iteration: 19139 r2_error: -140875.71309447705
Epoch 45: Loss 0.00267692
iteration: 19574 r2_error: -135284.8415664269
Epoch 46: Loss 0.00256964
iteration: 20009 r2_error: -129836.54762190585
Epoch 47: Loss 0.00246418
iteration: 20444 r2_error: -124467.70296493567
Epoch 48: Loss 0.00235998
iteration: 20879 r2_error: -119149.64269823337
Epoch 49: Loss 0.0022563
iteration: 21314 r2_error: -113878.74985589225
Epoch 50: Loss 0.00215371
iteration: 21749 r2_error: -108694.96010119902
Epoch 51: Loss 0.00205421
iteration: 22184 r2_error: -103700.73085920552
Epoch 52: Loss 0.00195907
iteration: 22619 r2_error: -98995.28596320927
Epoch 53: Loss 0.0018705
iteration: 23054 r2_error: -94635.03679140715
Epoch 54: Loss 0.0017889
iteration: 23489 r2_error: -90611.24359644466
Epoch 55: Loss 0.00171369
iteration: 23924 r2_error: -86900.18585929488
Epoch 56: Loss 0.0016447
iteration: 24359 r2_error: -83475.34716592831
Epoch 57: Loss 0.00158093
iteration: 24794 r2_error: -80308.85803900007
Epoch 58: Loss 0.00152191
iteration: 25229 r2_error: -77363.80268121295
Epoch 59: Loss 0.00146689
iteration: 25664 r2_error: -74614.60975559622
Epoch 60: Loss 0.00141549
iteration: 26099 r2_error: -72036.70405597861
Epoch 61: Loss 0.0013672
iteration: 26534 r2_error: -69610.96471197171
Epoch 62: Loss 0.0013216
iteration: 26969 r2_error: -67321.63512073699
Epoch 63: Loss 0.00127856
iteration: 27404 r2_error: -65155.12553719169
Epoch 64: Loss 0.00123775
iteration: 27839 r2_error: -63095.47936068428
Epoch 65: Loss 0.00119887
iteration: 28274 r2_error: -61137.13927811659
Epoch 66: Loss 0.00116201
iteration: 28709 r2_error: -59271.8513544454
Epoch 67: Loss 0.00112676
iteration: 29144 r2_error: -57490.31146925303
Epoch 68: Loss 0.00109298
iteration: 29579 r2_error: -55789.43651670633
Epoch 69: Loss 0.00106083
iteration: 30014 r2_error: -54161.183889262255
Epoch 70: Loss 0.00102986
iteration: 30449 r2_error: -52599.59143360772
Epoch 71: Loss 0.00100028
iteration: 30884 r2_error: -51101.21096913881
Epoch 72: Loss 0.000971805
iteration: 31319 r2_error: -49661.561693533375
Epoch 73: Loss 0.000944487
iteration: 31754 r2_error: -48276.26613525843
Epoch 74: Loss 0.000918114
iteration: 32189 r2_error: -46944.30076707729
Epoch 75: Loss 0.000892772
iteration: 32624 r2_error: -45661.53347990869
Epoch 76: Loss 0.000868331
iteration: 33059 r2_error: -44426.20176105225
Epoch 77: Loss 0.000844768
iteration: 33494 r2_error: -43234.94060405042
Epoch 78: Loss 0.000822068
iteration: 33929 r2_error: -42085.93544424816
Epoch 79: Loss 0.000800182
iteration: 34364 r2_error: -40976.47590848422
Epoch 80: Loss 0.000778954
iteration: 34799 r2_error: -39904.74345925817
Epoch 81: Loss 0.000758432
iteration: 35234 r2_error: -38868.40313642433
Epoch 82: Loss 0.000738659
iteration: 35669 r2_error: -37865.47832051474
Epoch 83: Loss 0.000719494
iteration: 36104 r2_error: -36894.81241263211
Epoch 84: Loss 0.000700816
iteration: 36539 r2_error: -35954.34539440137
Epoch 85: Loss 0.000682884
iteration: 36974 r2_error: -35042.13321791968
Epoch 86: Loss 0.000665371
iteration: 37409 r2_error: -34157.83673882779
Epoch 87: Loss 0.000648429
iteration: 37844 r2_error: -33298.86263106809
Epoch 88: Loss 0.000631878
iteration: 38279 r2_error: -32464.078417016815
Epoch 89: Loss 0.000615895
iteration: 38714 r2_error: -31651.62300139748
Epoch 90: Loss 0.000600389
iteration: 39149 r2_error: -30860.114574483254
Epoch 91: Loss 0.000585162
iteration: 39584 r2_error: -30089.100634126473
Epoch 92: Loss 0.000570342
iteration: 40019 r2_error: -29336.2592918563
Epoch 93: Loss 0.000555834
iteration: 40454 r2_error: -28599.888163383876
Epoch 94: Loss 0.000541653
iteration: 40889 r2_error: -27881.10659716914
Epoch 95: Loss 0.000527839
iteration: 41324 r2_error: -27178.92488135057
Epoch 96: Loss 0.000514217
iteration: 41759 r2_error: -26491.611589022414
Epoch 97: Loss 0.00050096
iteration: 42194 r2_error: -25822.29027997921
Epoch 98: Loss 0.000488104
iteration: 42629 r2_error: -25169.45804427071
Epoch 99: Loss 0.000475534
iteration: 43064 r2_error: -24537.83593084585
Epoch 100: Loss 0.000463364
iteration: 43499 r2_error: -23931.13196196953
Epoch 101: Loss 0.000451716
iteration: 43934 r2_error: -23355.295195068862
Epoch 102: Loss 0.00044081
iteration: 44369 r2_error: -22817.0684673459
Epoch 103: Loss 0.000430668
iteration: 44804 r2_error: -22322.580355261314
Epoch 104: Loss 0.000421451
iteration: 45239 r2_error: -21876.839753778553
Epoch 105: Loss 0.000413221
iteration: 45674 r2_error: -21481.17038311552
Epoch 106: Loss 0.000405943
iteration: 46109 r2_error: -21133.301940782967
Epoch 107: Loss 0.000399557
iteration: 46544 r2_error: -20829.545783869853
Epoch 108: Loss 0.000394054
iteration: 46979 r2_error: -20565.26111507075
Epoch 109: Loss 0.000389306
iteration: 47414 r2_error: -20334.834361169665
Epoch 110: Loss 0.000385089
iteration: 47849 r2_error: -20132.34437459645
Epoch 111: Loss 0.000381437
iteration: 48284 r2_error: -19952.805500275474
Epoch 112: Loss 0.000378151
iteration: 48719 r2_error: -19792.066527804524
Epoch 113: Loss 0.00037521
iteration: 49154 r2_error: -19647.62260208262
Epoch 114: Loss 0.00037256
iteration: 49589 r2_error: -19516.58706046729
###Markdown
Example of output, mean, comparing with validation output and validation mean.
###Code
sample_out
np.mean(sample_out)
validation_output
np.mean(validation_output)
output_layer.get_params()[0].get_value()
###Output
_____no_output_____
###Markdown
In the end, we load test set and make predictions on it.
###Code
test_x = np.load('test_sample.npy')#,dtype=theano.config.floatX)
test_y= np.load('test_target.npy')#,dtype=theano.config.floatX)
###Output
_____no_output_____
###Markdown
We train linear and random forest model beore we save predictions of all of them on test set. Predictions can then be compared.
###Code
from sklearn.linear_model import LinearRegression
reg = LinearRegression().fit(train_input, train_output)
linear_prediction = reg.predict(test_x)
from sklearn.ensemble import RandomForestRegressor
regr = RandomForestRegressor(n_estimators=250,criterion="mse")
regr.fit(train_input, train_output)
rf_prediction = regr.predict(test_x)
[sample_out]= sample_function(test_x)
import pandas as pd
export = pd.DataFrame(sample_out)
export['test_output'] = test_y
export['Random_forest'] = rf_prediction
export['Linear_prediction'] = linear_prediction
export.columns = ['sample_output', 'test_output','Random_forest','Linear_prediction']
export.to_csv("model_output2.csv")
###Output
_____no_output_____
###Markdown
And now we save values from layers and then save them to disk. In this way, we have the whole model on disk and we can even train it further later on or simply use it at any time that is needed.
###Code
values = lasagne.layers.get_all_param_values(network)
values
import pickle as pkl
with open('model5.pkl','wb') as f:
pkl.dump(values, f)
###Output
_____no_output_____ |
lab_01/exercises_lab01.ipynb | ###Markdown
Exercise 01 - "Python 101: Jupyter Notebooks and Python Basics"IEMBA 8/9 "Coding and Artificial Intelligence", University of St. Gallen The goal of the exercises below is to deepen your understanding of the Python language. In this Notebook, exercises are focused on our Python 101 Notebook. Feel free to use that Notebook as a reference, to search for help online, and to work together with others. If you have questions, please feel free to shout out!Please work on these exercises during the corresponding exercise session. We expect you to solve the **three mandatory exercises**, which are highlighted. All other exercises are optional and are not required to be solved! Basic Data Types **1. MANDATORY: Write a set of (or single) Python command(s) that compare the first and last character of a string.**> Write a set of Python commands that compare the first and last character of a string. In case both characters are the same the output should `True`, otherwise `False`. Test your statements one the strings `s1` and `s2` as defined below.
###Code
s1 = 'spooky'
s2 = 'sweets'
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**2. Write a set of (or single) Python command(s) that determine the properties of a string.**> Write a set of Python commands that determine the number of characters of a string and whether the characters are all upper case. If the number of characters is between 5 and 12 characters and all upper case output `True`, otherwise `False`. Test your statements on the strings `s1`, `s2`, and `s3` as defined below.
###Code
s1 = 'Cat'
s2 = 'RhinOzeRos'
s3 = 'PYRRHULOXIA'
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**3. Write a set of (or single) Python command(s) that prints a very scary sentence.** (just imagine it's Halloween)> Write a set of Python commands that prints the scariest sentence that you could imagine. The sentence should include at least 3 of the following words 'tarantula', 'moonlight', 'supernatural', 'fog', 'owl', 'nightmare', or 'poltergeist'.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
Data Containers **1. MANDATORY: Write a set of (or single) Python command(s) that determine the number of characters of a list element.**> Write a set of Python commands that determine the number of characters of the second element of a list. In case the element consists of more than 4 characters output `True`, otherwise `False`. Test your statements on lists `l1` and `l2` as defined below.
###Code
l1 = ['angel', 'nightmare', 'poltergeist']
l2 = ['darkness', 'king', 'fairy', 'owl']
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**2. Write a set of (or single) Python command(s) that compares the elements of a list.**> Write a set of Python commands that compares the first and last elements of a list. In case both elements consist of the same characters output `True`, otherwise `False`. Test your statements on lists `l1` and `l2` as defined below.
###Code
l1 = ['BROOMSTICK', 'ghostly', 'mYstEriOUs', 'BROOMSTICK']
l2 = ['darkness', 'king', 'fairy', 'owl']
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**3. Write a set of (or single) Python command(s) that removes elements from a list.**> Write a set of Python commands to print a specified list after removing the 0th, 2nd, 3rd, and 5th element. Test your statements on list `l` as defined below.
###Code
l = ['BROOMSTICK', 'Happy', 'mYstEriOUs', 'BROOMSTICK', 'Halloween', 'Poltergeist']
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
Fundamental Programming Structures **1. MANDATORY: Write a Python loop that multiplies all elements of a list with 66.**> Write a Python loop that multiplies all elements of a list with `66`. The input list is given by `range(0, 10)` and its output should result in a list as denoted by: `[0, 66, 132, 198, 264, 330, 396, 462, 528, 594]`.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**2. Write a Python loop that prints the numbers 1 to 10 backwards.**> Write a Python loop that prints the numbers 0 to 10 backwards. The output of the loop should print the following: `10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0`.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**3. Write a Python decision structure that prints all the numbers from 0 to 6 except 4 and 7.**> Write a Python decision structure that prints a number if it doesn't equal to 4 and 7. If the number equals 4 or 7 it should print 'forbidden number'.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
Functions **1. Write a Python function to calculate the length of a string.**>Write a Python function named **"string_length"** to calculate the length of an arbitrary string. The function should take an arbitrary string as input and count the number of its characters. Test your function accordingly using various string values and print the results, e.g., input: 'Halloween', expected result: 9.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**2. Write a Python program to get the largest number from a list.**>Write a Python function named **"max_num_in_list"** to get the largest number from a list. The function should take an arbitrary list of integer values as an input and should return the integer that corresponds to the highest value. Test your function accordingly using various string values and print the results, e.g., input: [1, 5, 8, 3], expected result: 8.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**3. Write a Python program to count the number of characters (character frequency) in a string.**>Write a Python function named **"char_frequency"** to count the number of distinct characters occurring in it. The function should take an arbitrary string as an input and should return the count of occurrence each individual character. Test your function accordingly using various string values and print the results, e.g., input: 'Happy Halllllloweeeeeen!', expected result: {'a': 2, ' ': 1, 'e': 6, 'H': 2, 'l': 6, 'o': 1, 'n': 1, 'p': 2, '!': 1, 'w': 1, 'y': 1}.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
Exercise 01 - "Python 101: Jupyter Notebooks and Python Basics"EMBA 60 W10 / EMBA 61 W5: Coding und Künstliche Intelligenz, University of St. Gallen The goal of the exercises below is to deepen your understanding of the Python language. In this Notebook, exercises are focused on our Python 101 Notebook. Feel free to use that Notebook as a reference, to search for help online, and to work together with others. If you have questions, please feel free to shout out!Please work on these exercises during the corresponding exercise session. We expect you to solve the **three mandatory exercises**, which are highlighted. All other exercises are optional and are not required to be solved! Basic Data Types **1. MANDATORY: Write a set of (or single) Python command(s) that compare the first and last character of a string.**> Write a set of Python commands that compare the first and last character of a string. In case both characters are the same the output should `True`, otherwise `False`. Test your statements one the strings `s1` and `s2` as defined below.
###Code
s1 = 'spooky'
s2 = 'sweets'
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**2. Write a set of (or single) Python command(s) that determine the properties of a string.**> Write a set of Python commands that determine the number of characters of a string and whether the characters are all upper case. If the number of characters is between 5 and 12 characters and all upper case output `True`, otherwise `False`. Test your statements on the strings `s1`, `s2`, and `s3` as defined below.
###Code
s1 = 'Cat'
s2 = 'RhinOzeRos'
s3 = 'PYRRHULOXIA'
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**3. Write a set of (or single) Python command(s) that prints a very scary sentence.** (just imagine it's Halloween)> Write a set of Python commands that prints the scariest sentence that you could imagine. The sentence should include at least 3 of the following words 'tarantula', 'moonlight', 'supernatural', 'fog', 'owl', 'nightmare', or 'poltergeist'.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
Data Containers **1. MANDATORY: Write a set of (or single) Python command(s) that determine the number of characters of a list element.**> Write a set of Python commands that determine the number of characters of the second element of a list. In case the element consists of more than 4 characters output `True`, otherwise `False`. Test your statements on lists `l1` and `l2` as defined below.
###Code
l1 = ['angel', 'nightmare', 'poltergeist']
l2 = ['darkness', 'king', 'fairy', 'owl']
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**2. Write a set of (or single) Python command(s) that compares the elements of a list.**> Write a set of Python commands that compares the first and last elements of a list. In case both elements consist of the same characters output `True`, otherwise `False`. Test your statements on lists `l1` and `l2` as defined below.
###Code
l1 = ['BROOMSTICK', 'ghostly', 'mYstEriOUs', 'BROOMSTICK']
l2 = ['darkness', 'king', 'fairy', 'owl']
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**3. Write a set of (or single) Python command(s) that removes elements from a list.**> Write a set of Python commands to print a specified list after removing the 0th, 2nd, 3rd, and 5th element. Test your statements on list `l` as defined below.
###Code
l = ['BROOMSTICK', 'Happy', 'mYstEriOUs', 'BROOMSTICK', 'Halloween', 'Poltergeist']
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
Fundamental Programming Structures **1. MANDATORY: Write a Python loop that multiplies all elements of a list with 66.**> Write a Python loop that multiplies all elements of a list with `66`. The input list is given by `range(0, 10)` and its output should result in a list as denoted by: `[0, 66, 132, 198, 264, 330, 396, 462, 528, 594]`.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**2. Write a Python loop that prints the numbers 1 to 10 backwards.**> Write a Python loop that prints the numbers 0 to 10 backwards. The output of the loop should print the following: `10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0`.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**3. Write a Python decision structure that prints all the numbers from 0 to 6 except 4 and 7.**> Write a Python decision structure that prints a number if it doesn't equal to 4 and 7. If the number equals 4 or 7 it should print 'forbidden number'.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
Functions **1. Write a Python function to calculate the length of a string.**>Write a Python function named **"string_length"** to calculate the length of an arbitrary string. The function should take an arbitrary string as input and count the number of its characters. Test your function accordingly using various string values and print the results, e.g., input: 'Halloween', expected result: 9.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**2. Write a Python program to get the largest number from a list.**>Write a Python function named **"max_num_in_list"** to get the largest number from a list. The function should take an arbitrary list of integer values as an input and should return the integer that corresponds to the highest value. Test your function accordingly using various string values and print the results, e.g., input: [1, 5, 8, 3], expected result: 8.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**3. Write a Python program to count the number of characters (character frequency) in a string.**>Write a Python function named **"char_frequency"** to count the number of distinct characters occurring in it. The function should take an arbitrary string as an input and should return the count of occurrence each individual character. Test your function accordingly using various string values and print the results, e.g., input: 'Happy Halllllloweeeeeen!', expected result: {'a': 2, ' ': 1, 'e': 6, 'H': 2, 'l': 6, 'o': 1, 'n': 1, 'p': 2, '!': 1, 'w': 1, 'y': 1}.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
Fundamentals of Python Programming: Data Types and ContainersIntroduction to AI and ML, University of St. Gallen, Fall Term 2021 The goal of the exercises below is to deepen your understanding of the Python language. In this Notebook, exercises are focused on our Python 101 Notebook. Feel free to use that Notebook as a reference, to search for help online, and to work together with others. If you have questions, please feel free to shout out!You don't have to work on these exercises in any specific order or finish them at all!In case you are wondering: yes, this Notebook is Halloween-themed. Basic Data Types **1. Write a set of (or single) Python command(s) that compare the first and last character of a string.**> Write a set of Python commands that compare the first and last character of a string. In case both characters are the same print 'True' otherwise print 'False'. Test your statements one the strings `s1` and `s2` as defined below.
###Code
s1 = 'spooky'
s2 = 'sweets'
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**2. Write a set of (or single) Python command(s) that determine the properties of a string.**> Write a set of Python commands that determine the number of characters of a string and whether the characters are all upper case. If the number of characters is between 5 and 12 characters and all upper case print 'True' otherwise print 'False'. Test your statements on the strings `s1`, `s2`, and `s3` as defined below.
###Code
s1 = 'Cat'
s2 = 'RhinOzeRos'
s3 = 'PYRRHULOXIA'
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**3. Write a set of (or single) Python command(s) that prints a very scary sentence.** (just imagine it's Halloween)> Write a set of Python commands that prints the scariest sentence that you could imagine. The sentence should include at least 3 of the following words 'tarantula', 'moonlight', 'supernatural', 'fog', 'owl', 'nightmare', or 'poltergeist'.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
Data Containers **1. Write a set of (or single) Python command(s) that determine the number of characters of a list element.**> Write a set of Python commands that determine the number of characters of the second element of a list. In case the element consists of more than 4 characters print 'True' otherwise print 'False'. Test your statements on lists `l1` and `l2` as defined below.
###Code
l1 = ['angel', 'nightmare', 'poltergeist']
l2 = ['darkness', 'king', 'fairy', 'owl']
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**2. Write a set of (or single) Python command(s) that compares the elements of a list.**> Write a set of Python commands that compares the first and last elements of a list. In case both elements consist of the same characters print 'True' otherwise print 'False'. Test your statements on lists `l1` and `l2` as defined below.
###Code
l1 = ['BROOMSTICK', 'ghostly', 'mYstEriOUs', 'BROOMSTICK']
l2 = ['darkness', 'king', 'fairy', 'owl']
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**3. Write a set of (or single) Python command(s) that removes elements of a list.**> Write a set of Python commands to print a specified list after removing the 0th, 2th, 3th, and 5th element. Test your statements on list `l` as defined below.
###Code
l = ['BROOMSTICK', 'Happy', 'mYstEriOUs', 'BROOMSTICK', 'Halloween', 'Poltergeist']
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
Fundamental Programming Structures **1. Write a Python loop that multiplies all elements of a list with 66.**> Write a Python loop that multiplies all elements of a list with `66`. The input list is given by `range(0, 10)` and its output should result in a list as denoted by: `[0, 66, 132, 198, 264, 330, 396, 462, 528, 594]`.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**2. Write a Python loop that prints the numbers 1 to 10 backwards.**> Write a Python loop that prints the numbers 0 to 10 backwards. The output of the loop should print the following: `10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0`.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**3. Write a Python decision structure that prints all the numbers from 0 to 6 except 4 and 7.**> Write a Python decision structure that prints a number if it doesn't equal to 4 and 7. If the number equals 4 or 7 it should print 'forbidden number'.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**4. Write a Python decision structure that evaluates if a number is a multiple of 5 and 7.**> Write a Python decision structure that evaluates if number is a multiple of 5 and 7. Hint: You may want to use Python's modulo operator (`%`) as part of your case evaluation.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
Functions **1. Write a Python function to calculate the length of a string.**>Write a Python function named **"string_length"** to calculate the length of an arbitrary string. The function should take an arbitrary string as input and count the number of its characters. Test your function accordingly using various string values and print the results, e.g., input: 'Halloween', expected result: 9.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**2. Write a Python program to get the largest number from a list.**>Write a Python function named **"max_num_in_list"** to get the largest number from a list. The function should take an arbitrary list of integer values as an input and should return the integer that corresponds to the highest value. Test your function accordingly using various string values and print the results, e.g., input: [1, 5, 8, 3], expected result: 8.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**3. Write a Python program to count the number of characters (character frequency) in a string.**>Write a Python function named **"char_frequency"** to count the number of distinct characters occurring in it. The function should take an arbitrary string as an input and should return the count of occurrence each individual character. Test your function accordingly using various string values and print the results, e.g., input: 'Happy Halllllloweeeeeen!', expected result: {'a': 2, ' ': 1, 'e': 6, 'H': 2, 'l': 6, 'o': 1, 'n': 1, 'p': 2, '!': 1, 'w': 1, 'y': 1}.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**4. Write a Python function that takes a list of words and returns the one exhibiting the most characters.**>Write a Python function named **find_longest_word** that takes a list of words and returns the length of the longest word in the list. The function should take an arbitrary list of string values (words) as an input and should return the word that exhibits the most characters. Test your function accordingly using various lists of string values and print the results, e.g., input: ['Happy', 'Halloween', '2018'], expected result: 'Halloween'.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
Fundamentals of Python Programming: Data Types and ContainersIntroduction to ML and DL, University of St. Gallen, Fall Term 2021 The goal of the exercises below is to deepen your understanding of the Python language. In this Notebook, exercises are focused on our Python 101 Notebook. Feel free to use that Notebook as a reference, to search for help online, and to work together with others. If you have questions, please feel free to shout out!You don't have to work on these exercises in any specific order or finish them at all!In case you are wondering: yes, this Notebook is Halloween-themed. Basic Data Types **1. Write a set of (or single) Python command(s) that compare the first and last character of a string.**> Write a set of Python commands that compare the first and last character of a string. In case both characters are the same print 'True' otherwise print 'False'. Test your statements one the strings `s1` and `s2` as defined below.
###Code
s1 = 'spooky'
s2 = 'sweets'
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**2. Write a set of (or single) Python command(s) that determine the properties of a string.**> Write a set of Python commands that determine the number of characters of a string and whether the characters are all upper case. If the number of characters is between 5 and 12 characters and all upper case print 'True' otherwise print 'False'. Test your statements on the strings `s1`, `s2`, and `s3` as defined below.
###Code
s1 = 'Cat'
s2 = 'RhinOzeRos'
s3 = 'PYRRHULOXIA'
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**3. Write a set of (or single) Python command(s) that prints a very scary sentence.** (just imagine it's Halloween)> Write a set of Python commands that prints the scariest sentence that you could imagine. The sentence should include at least 3 of the following words 'tarantula', 'moonlight', 'supernatural', 'fog', 'owl', 'nightmare', or 'poltergeist'.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
Data Containers **1. Write a set of (or single) Python command(s) that determine the number of characters of a list element.**> Write a set of Python commands that determine the number of characters of the second element of a list. In case the element consists of more than 4 characters print 'True' otherwise print 'False'. Test your statements on lists `l1` and `l2` as defined below.
###Code
l1 = ['angel', 'nightmare', 'poltergeist']
l2 = ['darkness', 'king', 'fairy', 'owl']
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**2. Write a set of (or single) Python command(s) that compares the elements of a list.**> Write a set of Python commands that compares the first and last elements of a list. In case both elements consist of the same characters print 'True' otherwise print 'False'. Test your statements on lists `l1` and `l2` as defined below.
###Code
l1 = ['BROOMSTICK', 'ghostly', 'mYstEriOUs', 'BROOMSTICK']
l2 = ['darkness', 'king', 'fairy', 'owl']
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**3. Write a set of (or single) Python command(s) that removes elements of a list.**> Write a set of Python commands to print a specified list after removing the 0th, 2th, 3th, and 5th element. Test your statements on list `l` as defined below.
###Code
l = ['BROOMSTICK', 'Happy', 'mYstEriOUs', 'BROOMSTICK', 'Halloween', 'Poltergeist']
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
Fundamental Programming Structures **1. Write a Python loop that multiplies all elements of a list with 66.**> Write a Python loop that multiplies all elements of a list with `66`. The input list is given by `range(0, 10)` and its output should result in a list as denoted by: `[0, 66, 132, 198, 264, 330, 396, 462, 528, 594]`.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**2. Write a Python loop that prints the numbers 1 to 10 backwards.**> Write a Python loop that prints the numbers 0 to 10 backwards. The output of the loop should print the following: `10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0`.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**3. Write a Python decision structure that prints all the numbers from 0 to 6 except 4 and 7.**> Write a Python decision structure that prints a number if it doesn't equal to 4 and 7. If the number equals 4 or 7 it should print 'forbidden number'.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**4. Write a Python decision structure that evaluates if a number is a multiple of 5 and 7.**> Write a Python decision structure that evaluates if number is a multiple of 5 and 7. Hint: You may want to use Python's modulo operator (`%`) as part of your case evaluation.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
Functions **1. Write a Python function to calculate the length of a string.**>Write a Python function named **"string_length"** to calculate the length of an arbitrary string. The function should take an arbitrary string as input and count the number of its characters. Test your function accordingly using various string values and print the results, e.g., input: 'Halloween', expected result: 9.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**2. Write a Python program to get the largest number from a list.**>Write a Python function named **"max_num_in_list"** to get the largest number from a list. The function should take an arbitrary list of integer values as an input and should return the integer that corresponds to the highest value. Test your function accordingly using various string values and print the results, e.g., input: [1, 5, 8, 3], expected result: 8.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**3. Write a Python program to count the number of characters (character frequency) in a string.**>Write a Python function named **"char_frequency"** to count the number of distinct characters occurring in it. The function should take an arbitrary string as an input and should return the count of occurrence each individual character. Test your function accordingly using various string values and print the results, e.g., input: 'Happy Halllllloweeeeeen!', expected result: {'a': 2, ' ': 1, 'e': 6, 'H': 2, 'l': 6, 'o': 1, 'n': 1, 'p': 2, '!': 1, 'w': 1, 'y': 1}.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____
###Markdown
**4. Write a Python function that takes a list of words and returns the one exhibiting the most characters.**>Write a Python function named **find_longest_word** that takes a list of words and returns the length of the longest word in the list. The function should take an arbitrary list of string values (words) as an input and should return the word that exhibits the most characters. Test your function accordingly using various lists of string values and print the results, e.g., input: ['Happy', 'Halloween', '2018'], expected result: 'Halloween'.
###Code
# ***************************************************
# INSERT YOUR SOLUTION CODE HERE
# ***************************************************
###Output
_____no_output_____ |
tests_TitanicDataset.ipynb | ###Markdown
Librerías necesariaspyplot nos ayudará con las pruebas recogidas en matrices a exponerlas en gráficos. En cambio, SequentialDecisionTreeAlgorithm es el archivo que contiene la clase SequentialDecisionTreeAlgorithm como propuesta de algoritmo secuencial del alumno.
###Code
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# Para modelo de aprendizaje por árboles de decisión
from lib.SequentialDecisionTreeAlgorithm import SequentialDecisionTreeAlgorithm
# Generalizado para más modelos: árboles de decisión o knn
from lib.SequentialModelAlgorithm import SequentialModelAlgorithm
# Selección de columnas
dataset = pd.read_csv('./datasets/titanic.csv', header = 0)
attr_cols = dataset.loc[:, 'Pclass':'Is_Married']
obj_col = dataset['Survived']
###Output
_____no_output_____
###Markdown
1. Pruebas en el Dataset titanicLa primera prueba la realizamos con parámetros predeterminados que usaremos también en las demás pruebas. Esto nos ayudará a fijar unas proporciones y poder comparar los resultados del algoritmo entre diferentes datasets
###Code
print('---- titanic.csv ----')
print('Hiperparámetros predeterminados: \n- ntree = 300\n- sample_size = 0.65\n- max_depth = 10\n- lr = 0.1')
SeqTree = SequentialDecisionTreeAlgorithm()
trees, score = SeqTree.start(attributes_cols = attr_cols, objetive_col = obj_col)
print('Score: '+str(score))
###Output
---- titanic.csv ----
Hiperparámetros predeterminados:
- ntree = 300
- sample_size = 0.65
- max_depth = 10
- lr = 0.1
Score: 0.9928279684916853
###Markdown
1.1 Alteración del hiperparámetro ntreeCon esto aumentaremos o disminuiremos la cantidad de árboles secuenciales en diferentes pruebas. Probaremos fijando los parámetros predeterminados y moviendo solo el parámetro ntrees entre 1 y 500.
###Code
values = [1, 50, 100, 150, 250, 350, 500]
print('\n--- Alterando sample_size ['+str(np.min(values))+' - '+str(np.max(values))+'] ---')
scores = []
for v in range(len(values)):
SeqTree = SequentialDecisionTreeAlgorithm(ntree = values[v], sample_size = 0.65, max_depth = 10, lr = 0.1)
trees, score = SeqTree.start(attributes_cols = attr_cols, objetive_col = obj_col)
scores.append(score)
# Realizamos una gráfica para determinar cómo afecta ntree a la puntuación
plt.plot(values, scores)
plt.xlabel('- nTrees -')
plt.ylabel('- Balanced Accuracy Score -')
plt.title('Alteración del hiperparámetro ntrees y manteniendo el resto constante')
plt.show()
m = np.argmax(scores)
print('Mejor puntuación: '+str(scores[m])+' con valor = '+str(values[m]))
print('--------------------------')
print('\n')
###Output
--- Alterando sample_size [1 - 500] ---
###Markdown
Observamos que a medida que nos acercamos a los 50 árboles obtenemos el máximo valor de puntuación. Es una función logarítmica por lo que no esperamos encontrar mejores valores por más que aumentemos los árboles. Podemos concretar que lo más óptimo sería fijar el algoritmo en 50 árboles al menos para este ejemplo, ya que nos devolvería un resultado similar pero en menor ejecución 1.2 Alteración del hiperparámetro sample_sizeCon esto aumentamos o disminuimos el la proporción del tamaño de la muestra del del meta-algoritmo
###Code
values = [0.1, 0.25, 0.5, 0.75, 0.9]
print('\n--- Alterando sample_size ['+str(np.min(values))+' - '+str(np.max(values))+'] ---')
scores = []
for v in range(len(values)):
SeqTree = SequentialDecisionTreeAlgorithm(ntree = 300, sample_size = values[v], max_depth = 10, lr = 0.1)
trees, score = SeqTree.start(attributes_cols = attr_cols, objetive_col = obj_col)
scores.append(score)
# Realizamos una gráfica para determinar cómo afecta ntree a la puntuación
plt.plot(values, scores)
plt.xlabel('- Proporción sample_size -')
plt.ylabel('- Balanced Accuracy Score -')
plt.title('Alteración del hiperparámetro sample_size')
plt.show()
m = np.argmax(scores)
print('Mejor puntuación: '+str(scores[m])+' con valor = '+str(values[m]))
###Output
--- Alterando sample_size [0.1 - 0.9] ---
###Markdown
Debido a la aletoriedad de la muestra, este parámetro no muestra un cambio directo. Comprobamos también realizando el proceso 5 veces y realizando la media de los resultados obtenidos para cada valor (puede tardar bastante):
###Code
values = [0.1, 0.25, 0.5, 0.75, 0.9]
print('\n--- Alterando 5 veces sample_size ['+str(np.min(values))+' - '+str(np.max(values))+'] ---')
tests = []
for j in range(5):
for v in range(len(values)):
SeqTree = SequentialDecisionTreeAlgorithm(ntree = 300, sample_size = values[v], max_depth = 10, lr = 0.1)
trees, score = SeqTree.start(attributes_cols = attr_cols, objetive_col = obj_col)
if len(tests)<= v:
tests.append([])
tests[v].append(score)
print(np.mean(tests, axis=0))
# Realizamos una gráfica para determinar cómo afecta ntree a la puntuación
plt.plot(values, np.mean(tests, axis=0))
plt.xlabel('- Proporción sample_size -')
plt.ylabel('- Balanced Accuracy Score -')
plt.title('Alteración del hiperparámetro sample_size')
plt.show()
m = np.argmax(np.mean(tests, axis=0))
print('Mejor puntuación: '+str(np.mean(tests, axis=0)[m])+' con valor = '+str(values[m]))
###Output
--- Alterando 5 veces sample_size [0.1 - 0.9] ---
[0.99084411 0.99206457 0.99139356 0.99050861 0.99130118]
###Markdown
1.3 Alteración del hiperparámetro max_depth
###Code
values = [1, 5, 10, 15, 20]
print('\n--- Alterando max_depth ['+str(np.min(values))+' - '+str(np.max(values))+'] ---')
scores = []
for v in range(len(values)):
SeqTree = SequentialDecisionTreeAlgorithm(ntree = 300, sample_size = 0.65, max_depth = values[v], lr = 0.1)
trees, score = SeqTree.start(attributes_cols = attr_cols, objetive_col = obj_col)
scores.append(score)
# Realizamos una gráfica para determinar cómo afecta ntree a la puntuación
plt.plot(values, scores)
plt.xlabel('- max_depth -')
plt.ylabel('- Balanced Accuracy Score -')
plt.title('Alteración del hiperparámetro max_depth')
plt.show()
m = np.argmax(scores)
print('Mejor puntuación: '+str(scores[m])+' con valor = '+str(values[m]))
###Output
--- Alterando max_depth [1 - 20] ---
###Markdown
1.4 Alteración del hiperparámetro de aprendizaje lr
###Code
values = [0.1, 0.25, 0.5, 0.75, 0.9]
print('\n--- Alterando lr ['+str(np.min(values))+' - '+str(np.max(values))+'] ---')
scores = []
for v in range(len(values)):
SeqTree = SequentialDecisionTreeAlgorithm(ntree = 300, sample_size = 0.65, max_depth = 10, lr = values[v])
trees, score = SeqTree.start(attributes_cols = attr_cols, objetive_col = obj_col)
scores.append(score)
# Realizamos una gráfica para determinar cómo afecta ntree a la puntuación
plt.plot(values, scores)
plt.xlabel('- lr -')
plt.ylabel('- Balanced Accuracy Score -')
plt.title('Alteración del hiperparámetro de aprendizaje lr')
plt.show()
m = np.argmax(np.mean(tests, axis=0))
print('Mejor puntuación: '+str(np.mean(tests, axis=0)[m])+' con valor = '+str(values[m]))
values = [0.1, 0.25, 0.5, 0.75, 0.9]
print('\n--- Alterando 5 veces lr ['+str(np.min(values))+' - '+str(np.max(values))+'] ---')
tests = []
for j in range(5):
for v in range(len(values)):
SeqTree = SequentialDecisionTreeAlgorithm(ntree = 300, sample_size = values[v], max_depth = 10, lr = 0.1)
trees, score = SeqTree.start(attributes_cols = attr_cols, objetive_col = obj_col)
if len(tests)<= v:
tests.append([])
tests[v].append(score)
print(np.mean(tests, axis=0))
# Realizamos una gráfica para determinar cómo afecta ntree a la puntuación
plt.plot(values, np.mean(tests, axis=0))
plt.xlabel('- Proporción sample_size -')
plt.ylabel('- Balanced Accuracy Score -')
plt.title('Alteración del hiperparámetro lr')
plt.show()
m = np.argmax(scores)
print('Mejor puntuación: '+str(scores[m])+' con valor = '+str(values[m]))
###Output
--- Alterando 5 veces lr [0.1 - 0.9] ---
[0.99050861 0.99084411 0.99206457 0.98995916 0.98995916]
###Markdown
Exploración y evaluación de otros hiperparámetrosEvaluaremos hiperparámetros opcionales como min_weight_fraction_leaf, min_samples_leaf y max_features
###Code
values = [0.1, 0.2, 0.3, 0.4]
print('\n--- Alterando min_weight_fraction_leaf ['+str(np.min(values))+' - '+str(np.max(values))+'] ---')
scores = []
for v in range(len(values)):
SeqTree = SequentialDecisionTreeAlgorithm(ntree = 300, sample_size = 0.65, max_depth = 10, lr = 0.1, min_weight_fraction_leaf=values[v])
trees, score = SeqTree.start(attributes_cols = attr_cols, objetive_col = obj_col)
scores.append(score)
# Realizamos una gráfica para determinar cómo afecta ntree a la puntuación
plt.plot(values, scores)
plt.xlabel('- min_weight_fraction_leaf -')
plt.ylabel('- Balanced Accuracy Score -')
plt.title('Alteración del hiperparámetro de aprendizaje min_weight_fraction_leaf')
plt.show()
m = np.argmax(scores)
print('Mejor puntuación: '+str(scores[m])+' con valor = '+str(values[m]))
###Output
--- Alterando min_weight_fraction_leaf [0.1 - 0.4] ---
###Markdown
2. Ejemplo con mejor configuraciónAjustamos con:- ntree = 55- sample_size = 0.65- max_depth = 5- lr = 0.35
###Code
SeqTree = SequentialDecisionTreeAlgorithm(ntree = 55, sample_size = 0.65, max_depth = 5, lr = 0.35)
trees, score = SeqTree.start(attributes_cols = attr_cols, objetive_col = obj_col)
print('Score final: '+str(score))
###Output
Score final: 0.9945054945054945
###Markdown
3. Ejemplo con knn Alterando nmodels
###Code
values = [1, 5, 10, 15, 20, 30, 50, 100, 175, 250, 350]
print('\n--- Alterando nmodels ['+str(np.min(values))+' - '+str(np.max(values))+'] ---')
scores = []
for v in range(len(values)):
SeqModel = SequentialModelAlgorithm(nmodels = values[v], method = "knn")
models, score = SeqModel.start(attributes_cols = attr_cols, objetive_col = obj_col)
scores.append(score)
# Realizamos una gráfica para determinar cómo afecta ntree a la puntuación
plt.plot(values, scores)
plt.xlabel('- nmodels -')
plt.ylabel('- Balanced Accuracy Score -')
plt.title('Alteración del hiperparámetro nmodels')
plt.show()
m = np.argmax(scores)
print('Mejor puntuación: '+str(scores[m])+' con valor = '+str(values[m]))
###Output
--- Alterando nmodels [1 - 350] ---
###Markdown
Alterando n_neighbors
###Code
values = [1, 2, 3, 4, 5]
print('\n--- Alterando n_neighbors ['+str(np.min(values))+' - '+str(np.max(values))+'] ---')
scores = []
for v in range(len(values)):
SeqModel = SequentialModelAlgorithm(neighbors = values[v], nmodels=10, method = "knn")
models, score = SeqModel.start(attributes_cols = attr_cols, objetive_col = obj_col)
scores.append(score)
# Realizamos una gráfica para determinar cómo afecta ntree a la puntuación
plt.plot(values, scores)
plt.xlabel('- nmodels -')
plt.ylabel('- Balanced Accuracy Score -')
plt.title('Alteración del hiperparámetro nmodels')
plt.show()
m = np.argmax(scores)
print('Mejor puntuación: '+str(scores[m])+' con valor = '+str(values[m]))
###Output
--- Alterando n_neighbors [1 - 5] ---
|
boards/Pynq-Z1/base/notebooks/pmod/pmod_grove_tmp.ipynb | ###Markdown
Grove Temperature Sensor 1.2This example shows how to use the [Grove Temperature Sensor v1.2](http://wiki.seeedstudio.com/Grove-Temperature_Sensor_V1.2/). You will also see how to plot a graph using matplotlib. The Grove Temperature sensor produces an analog signal, and requires an ADC. A Grove Temperature sensor and Pynq Grove Adapter, or Pynq Shield is required. The Grove Temperature Sensor, Pynq Grove Adapter, and Grove I2C ADC are used for this example. You can read a single value of temperature or read multiple values at regular intervals for a desired duration.At the end of this notebook, a Python only solution with single-sample read functionality is provided. 1. Load overlay
###Code
from pynq.overlays.base import BaseOverlay
base = BaseOverlay("base.bit")
###Output
_____no_output_____
###Markdown
2. Read single temperatureThis example shows on how to get a single temperature sample from the Grove TMP sensor.The Grove ADC is assumed to be attached to the GR4 connector of the StickIt. The StickIt module is assumed to be plugged in the 1st PMOD labeled JB. The Grove TMP sensor is connected to the other connector of the Grove ADC.Grove ADC provides a raw sample which is converted into resistance first and then converted into temperature.
###Code
import math
from pynq.lib.pmod import Grove_TMP
from pynq.lib.pmod import PMOD_GROVE_G4
tmp = Grove_TMP(base.PMODB,PMOD_GROVE_G4)
temperature = tmp.read()
print(float("{0:.2f}".format(temperature)),'degree Celsius')
###Output
28.2 degree Celsius
###Markdown
3. Start logging once every 100ms for 10 secondsExecuting the next cell will start logging the temperature sensor values every 100ms, and will run for 10s. You can try touch/hold the temperature sensor to vary the measured temperature.You can vary the logging interval and the duration by changing the values 100 and 10 in the cellbelow. The raw samples are stored in the internal memory, and converted into temperature values.
###Code
import time
%matplotlib inline
import matplotlib.pyplot as plt
tmp.set_log_interval_ms(100)
tmp.start_log()
# Change input during this time
time.sleep(10)
tmp_log = tmp.get_log()
plt.plot(range(len(tmp_log)), tmp_log, 'ro')
plt.title('Grove Temperature Plot')
min_tmp_log = min(tmp_log)
max_tmp_log = max(tmp_log)
plt.axis([0, len(tmp_log), min_tmp_log, max_tmp_log])
plt.show()
###Output
_____no_output_____
###Markdown
4. A Pure Python class to exercise the AXI IIC Controller inheriting from PMOD_IICThis class is ported from http://wiki.seeedstudio.com/Grove-Temperature_Sensor/
###Code
from time import sleep
from math import log
from pynq.lib.pmod import PMOD_GROVE_G3
from pynq.lib.pmod import PMOD_GROVE_G4
from pynq.lib import Pmod_IIC
class Python_Grove_TMP(Pmod_IIC):
"""This class controls the grove temperature sensor.
This class inherits from the PMODIIC class.
Attributes
----------
iop : _IOP
The _IOP object returned from the DevMode.
scl_pin : int
The SCL pin number.
sda_pin : int
The SDA pin number.
iic_addr : int
The IIC device address.
"""
def __init__(self, pmod_id, gr_pins, model = 'v1.2'):
"""Return a new instance of a grove OLED object.
Parameters
----------
pmod_id : int
The PMOD ID (1, 2) corresponding to (PMODA, PMODB).
gr_pins: list
The group pins on Grove Adapter. G3 or G4 is valid.
model : string
Temperature sensor model (can be found on the device).
"""
if gr_pins in [PMOD_GROVE_G3, PMOD_GROVE_G4]:
[scl_pin,sda_pin] = gr_pins
else:
raise ValueError("Valid group numbers are G3 and G4.")
# Each revision has its own B value
if model == 'v1.2':
# v1.2 uses thermistor NCP18WF104F03RC
self.bValue = 4250
elif model == 'v1.1':
# v1.1 uses thermistor NCP18WF104F03RC
self.bValue = 4250
else:
# v1.0 uses thermistor TTC3A103*39H
self.bValue = 3975
super().__init__(pmod_id, scl_pin, sda_pin, 0x50)
# Initialize the Grove ADC
self.send([0x2,0x20]);
def read(self):
"""Read temperature in Celsius from grove temperature sensor.
Parameters
----------
None
Returns
-------
float
Temperature reading in Celsius.
"""
val = self._read_grove_adc()
R = 4095.0/val - 1.0
temp = 1.0/(log(R)/self.bValue + 1/298.15)-273.15
return temp
def _read_grove_adc(self):
self.send([0])
bytes = self.receive(2)
return 2*(((bytes[0] & 0x0f) << 8) | bytes[1])
from pynq import PL
# Flush IOP state
PL.reset()
py_tmp = Python_Grove_TMP(base.PMODB, PMOD_GROVE_G4)
temperature = py_tmp.read()
print(float("{0:.2f}".format(temperature)),'degree Celsius')
###Output
30.54 degree Celsius
###Markdown
Grove Temperature Sensor 1.2This example shows how to use the [Grove Temperature Sensor v1.2](http://www.seeedstudio.com/wiki/Grove_-_Temperature_Sensor_V1.2) on the Pynq-Z1 board. You will also see how to plot a graph using matplotlib. The Grove Temperature sensor produces an analog signal, and requires an ADC. A [Grove Temperature sensor](http://www.seeedstudio.com/depot/grove-led-bar-p-1178.html) and Pynq Grove Adapter, or Pynq Shield is required. The Grove Temperature Sensor, Pynq Grove Adapter, and Grove I2C ADC are used for this example. You can read a single value of temperature or read multiple values at regular intervals for a desired duration.At the end of this notebook, a Python only solution with single-sample read functionality is provided. 1. Load overlay
###Code
from pynq.overlays.base import BaseOverlay
base = BaseOverlay("base.bit")
###Output
_____no_output_____
###Markdown
2. Read single temperatureThis example shows on how to get a single temperature sample from the Grove TMP sensor.The Grove ADC is assumed to be attached to the GR4 connector of the StickIt. The StickIt module is assumed to be plugged in the 1st PMOD labeled JB. The Grove TMP sensor is connected to the other connector of the Grove ADC.Grove ADC provides a raw sample which is converted into resistance first and then converted into temperature.
###Code
import math
from pynq.lib.pmod import Grove_TMP
from pynq.lib.pmod import PMOD_GROVE_G4
tmp = Grove_TMP(base.PMODB,PMOD_GROVE_G4)
temperature = tmp.read()
print(float("{0:.2f}".format(temperature)),'degree Celsius')
###Output
28.2 degree Celsius
###Markdown
3. Start logging once every 100ms for 10 secondsExecuting the next cell will start logging the temperature sensor values every 100ms, and will run for 10s. You can try touch/hold the temperature sensor to vary the measured temperature.You can vary the logging interval and the duration by changing the values 100 and 10 in the cellbelow. The raw samples are stored in the internal memory, and converted into temperature values.
###Code
import time
%matplotlib inline
import matplotlib.pyplot as plt
tmp.set_log_interval_ms(100)
tmp.start_log()
# Change input during this time
time.sleep(10)
tmp_log = tmp.get_log()
plt.plot(range(len(tmp_log)), tmp_log, 'ro')
plt.title('Grove Temperature Plot')
min_tmp_log = min(tmp_log)
max_tmp_log = max(tmp_log)
plt.axis([0, len(tmp_log), min_tmp_log, max_tmp_log])
plt.show()
###Output
_____no_output_____
###Markdown
4. A Pure Python class to exercise the AXI IIC Controller inheriting from PMOD_IICThis class is ported from http://www.seeedstudio.com/wiki/Grove_-_Temperature_Sensor.
###Code
from time import sleep
from math import log
from pynq.lib.pmod import PMOD_GROVE_G3
from pynq.lib.pmod import PMOD_GROVE_G4
from pynq.lib import Pmod_IIC
class Python_Grove_TMP(Pmod_IIC):
"""This class controls the grove temperature sensor.
This class inherits from the PMODIIC class.
Attributes
----------
iop : _IOP
The _IOP object returned from the DevMode.
scl_pin : int
The SCL pin number.
sda_pin : int
The SDA pin number.
iic_addr : int
The IIC device address.
"""
def __init__(self, pmod_id, gr_pins, model = 'v1.2'):
"""Return a new instance of a grove OLED object.
Parameters
----------
pmod_id : int
The PMOD ID (1, 2) corresponding to (PMODA, PMODB).
gr_pins: list
The group pins on Grove Adapter. G3 or G4 is valid.
model : string
Temperature sensor model (can be found on the device).
"""
if gr_pins in [PMOD_GROVE_G3, PMOD_GROVE_G4]:
[scl_pin,sda_pin] = gr_pins
else:
raise ValueError("Valid group numbers are G3 and G4.")
# Each revision has its own B value
if model == 'v1.2':
# v1.2 uses thermistor NCP18WF104F03RC
self.bValue = 4250
elif model == 'v1.1':
# v1.1 uses thermistor NCP18WF104F03RC
self.bValue = 4250
else:
# v1.0 uses thermistor TTC3A103*39H
self.bValue = 3975
super().__init__(pmod_id, scl_pin, sda_pin, 0x50)
# Initialize the Grove ADC
self.send([0x2,0x20]);
def read(self):
"""Read temperature in Celsius from grove temperature sensor.
Parameters
----------
None
Returns
-------
float
Temperature reading in Celsius.
"""
val = self._read_grove_adc()
R = 4095.0/val - 1.0
temp = 1.0/(log(R)/self.bValue + 1/298.15)-273.15
return temp
def _read_grove_adc(self):
self.send([0])
bytes = self.receive(2)
return 2*(((bytes[0] & 0x0f) << 8) | bytes[1])
from pynq import PL
# Flush IOP state
PL.reset()
py_tmp = Python_Grove_TMP(base.PMODB, PMOD_GROVE_G4)
temperature = py_tmp.read()
print(float("{0:.2f}".format(temperature)),'degree Celsius')
###Output
30.54 degree Celsius
###Markdown
Grove Temperature Sensor 1.2This example shows how to use the [Grove Temperature Sensor v1.2](http://wiki.seeedstudio.com/Grove-Temperature_Sensor_V1.2/). You will also see how to plot a graph using matplotlib. The Grove Temperature sensor produces an analog signal, and requires an ADC. A Grove Temperature sensor and Pynq Grove Adapter, or Pynq Shield is required. The Grove Temperature Sensor, Pynq Grove Adapter, and Grove I2C ADC are used for this example. You can read a single value of temperature or read multiple values at regular intervals for a desired duration.At the end of this notebook, a Python only solution with single-sample read functionality is provided. 1. Load overlay
###Code
from pynq.overlays.base import BaseOverlay
base = BaseOverlay("base.bit")
###Output
_____no_output_____
###Markdown
2. Read single temperatureThis example shows on how to get a single temperature sample from the Grove TMP sensor.The Grove ADC is assumed to be attached to the GR4 connector of the StickIt. The StickIt module is assumed to be plugged in the 1st PMOD labeled JB. The Grove TMP sensor is connected to the other connector of the Grove ADC.Grove ADC provides a raw sample which is converted into resistance first and then converted into temperature.
###Code
import math
from pynq.lib.pmod import Grove_TMP
from pynq.lib.pmod import PMOD_GROVE_G4
tmp = Grove_TMP(base.PMODB,PMOD_GROVE_G4)
temperature = tmp.read()
print(float("{0:.2f}".format(temperature)),'degree Celsius')
###Output
28.2 degree Celsius
###Markdown
3. Start logging once every 100ms for 10 secondsExecuting the next cell will start logging the temperature sensor values every 100ms, and will run for 10s. You can try touch/hold the temperature sensor to vary the measured temperature.You can vary the logging interval and the duration by changing the values 100 and 10 in the cell below. The raw samples are stored in the internal memory, and converted into temperature values.
###Code
import time
%matplotlib inline
import matplotlib.pyplot as plt
tmp.set_log_interval_ms(100)
tmp.start_log()
# Change input during this time
time.sleep(10)
tmp_log = tmp.get_log()
plt.plot(range(len(tmp_log)), tmp_log, 'ro')
plt.title('Grove Temperature Plot')
min_tmp_log = min(tmp_log)
max_tmp_log = max(tmp_log)
plt.axis([0, len(tmp_log), min_tmp_log, max_tmp_log])
plt.show()
###Output
_____no_output_____
###Markdown
4. A Pure Python class to exercise the AXI IIC Controller inheriting from PMOD_IICThis class is ported from http://wiki.seeedstudio.com/Grove-Temperature_Sensor/
###Code
from time import sleep
from math import log
from pynq.lib.pmod import PMOD_GROVE_G3
from pynq.lib.pmod import PMOD_GROVE_G4
from pynq.lib import Pmod_IIC
class Python_Grove_TMP(Pmod_IIC):
"""This class controls the grove temperature sensor.
This class inherits from the PMODIIC class.
Attributes
----------
iop : _IOP
The _IOP object returned from the DevMode.
scl_pin : int
The SCL pin number.
sda_pin : int
The SDA pin number.
iic_addr : int
The IIC device address.
"""
def __init__(self, pmod_id, gr_pins, model = 'v1.2'):
"""Return a new instance of a grove OLED object.
Parameters
----------
pmod_id : int
The PMOD ID (1, 2) corresponding to (PMODA, PMODB).
gr_pins: list
The group pins on Grove Adapter. G3 or G4 is valid.
model : string
Temperature sensor model (can be found on the device).
"""
if gr_pins in [PMOD_GROVE_G3, PMOD_GROVE_G4]:
[scl_pin,sda_pin] = gr_pins
else:
raise ValueError("Valid group numbers are G3 and G4.")
# Each revision has its own B value
if model == 'v1.2':
# v1.2 uses thermistor NCP18WF104F03RC
self.bValue = 4250
elif model == 'v1.1':
# v1.1 uses thermistor NCP18WF104F03RC
self.bValue = 4250
else:
# v1.0 uses thermistor TTC3A103*39H
self.bValue = 3975
super().__init__(pmod_id, scl_pin, sda_pin, 0x50)
# Initialize the Grove ADC
self.send([0x2,0x20]);
def read(self):
"""Read temperature in Celsius from grove temperature sensor.
Parameters
----------
None
Returns
-------
float
Temperature reading in Celsius.
"""
val = self._read_grove_adc()
R = 4095.0/val - 1.0
temp = 1.0/(log(R)/self.bValue + 1/298.15)-273.15
return temp
def _read_grove_adc(self):
self.send([0])
bytes = self.receive(2)
return 2*(((bytes[0] & 0x0f) << 8) | bytes[1])
from pynq import PL
# Flush IOP state
PL.reset()
py_tmp = Python_Grove_TMP(base.PMODB, PMOD_GROVE_G4)
temperature = py_tmp.read()
print(float("{0:.2f}".format(temperature)),'degree Celsius')
###Output
30.54 degree Celsius
###Markdown
Grove Temperature Sensor 1.2This example shows how to use the [Grove Temperature Sensor v1.2](http://www.seeedstudio.com/wiki/Grove_-_Temperature_Sensor_V1.2). You will also see how to plot a graph using matplotlib. The Grove Temperature sensor produces an analog signal, and requires an ADC. A [Grove Temperature sensor](http://www.seeedstudio.com/depot/grove-led-bar-p-1178.html) and Pynq Grove Adapter, or Pynq Shield is required. The Grove Temperature Sensor, Pynq Grove Adapter, and Grove I2C ADC are used for this example. You can read a single value of temperature or read multiple values at regular intervals for a desired duration.At the end of this notebook, a Python only solution with single-sample read functionality is provided. 1. Load overlay
###Code
from pynq.overlays.base import BaseOverlay
base = BaseOverlay("base.bit")
###Output
_____no_output_____
###Markdown
2. Read single temperatureThis example shows on how to get a single temperature sample from the Grove TMP sensor.The Grove ADC is assumed to be attached to the GR4 connector of the StickIt. The StickIt module is assumed to be plugged in the 1st PMOD labeled JB. The Grove TMP sensor is connected to the other connector of the Grove ADC.Grove ADC provides a raw sample which is converted into resistance first and then converted into temperature.
###Code
import math
from pynq.lib.pmod import Grove_TMP
from pynq.lib.pmod import PMOD_GROVE_G4
tmp = Grove_TMP(base.PMODB,PMOD_GROVE_G4)
temperature = tmp.read()
print(float("{0:.2f}".format(temperature)),'degree Celsius')
###Output
28.2 degree Celsius
###Markdown
3. Start logging once every 100ms for 10 secondsExecuting the next cell will start logging the temperature sensor values every 100ms, and will run for 10s. You can try touch/hold the temperature sensor to vary the measured temperature.You can vary the logging interval and the duration by changing the values 100 and 10 in the cellbelow. The raw samples are stored in the internal memory, and converted into temperature values.
###Code
import time
%matplotlib inline
import matplotlib.pyplot as plt
tmp.set_log_interval_ms(100)
tmp.start_log()
# Change input during this time
time.sleep(10)
tmp_log = tmp.get_log()
plt.plot(range(len(tmp_log)), tmp_log, 'ro')
plt.title('Grove Temperature Plot')
min_tmp_log = min(tmp_log)
max_tmp_log = max(tmp_log)
plt.axis([0, len(tmp_log), min_tmp_log, max_tmp_log])
plt.show()
###Output
_____no_output_____
###Markdown
4. A Pure Python class to exercise the AXI IIC Controller inheriting from PMOD_IICThis class is ported from http://www.seeedstudio.com/wiki/Grove_-_Temperature_Sensor.
###Code
from time import sleep
from math import log
from pynq.lib.pmod import PMOD_GROVE_G3
from pynq.lib.pmod import PMOD_GROVE_G4
from pynq.lib import Pmod_IIC
class Python_Grove_TMP(Pmod_IIC):
"""This class controls the grove temperature sensor.
This class inherits from the PMODIIC class.
Attributes
----------
iop : _IOP
The _IOP object returned from the DevMode.
scl_pin : int
The SCL pin number.
sda_pin : int
The SDA pin number.
iic_addr : int
The IIC device address.
"""
def __init__(self, pmod_id, gr_pins, model = 'v1.2'):
"""Return a new instance of a grove OLED object.
Parameters
----------
pmod_id : int
The PMOD ID (1, 2) corresponding to (PMODA, PMODB).
gr_pins: list
The group pins on Grove Adapter. G3 or G4 is valid.
model : string
Temperature sensor model (can be found on the device).
"""
if gr_pins in [PMOD_GROVE_G3, PMOD_GROVE_G4]:
[scl_pin,sda_pin] = gr_pins
else:
raise ValueError("Valid group numbers are G3 and G4.")
# Each revision has its own B value
if model == 'v1.2':
# v1.2 uses thermistor NCP18WF104F03RC
self.bValue = 4250
elif model == 'v1.1':
# v1.1 uses thermistor NCP18WF104F03RC
self.bValue = 4250
else:
# v1.0 uses thermistor TTC3A103*39H
self.bValue = 3975
super().__init__(pmod_id, scl_pin, sda_pin, 0x50)
# Initialize the Grove ADC
self.send([0x2,0x20]);
def read(self):
"""Read temperature in Celsius from grove temperature sensor.
Parameters
----------
None
Returns
-------
float
Temperature reading in Celsius.
"""
val = self._read_grove_adc()
R = 4095.0/val - 1.0
temp = 1.0/(log(R)/self.bValue + 1/298.15)-273.15
return temp
def _read_grove_adc(self):
self.send([0])
bytes = self.receive(2)
return 2*(((bytes[0] & 0x0f) << 8) | bytes[1])
from pynq import PL
# Flush IOP state
PL.reset()
py_tmp = Python_Grove_TMP(base.PMODB, PMOD_GROVE_G4)
temperature = py_tmp.read()
print(float("{0:.2f}".format(temperature)),'degree Celsius')
###Output
30.54 degree Celsius
|
assignment3/Generative_Adversarial_Networks_TF.ipynb | ###Markdown
Generative Adversarial Networks (GANs)So far in CS231N, all the applications of neural networks that we have explored have been **discriminative models** that take an input and are trained to produce a labeled output. This has ranged from straightforward classification of image categories to sentence generation (which was still phrased as a classification problem, our labels were in vocabulary space and we’d learned a recurrence to capture multi-word labels). In this notebook, we will expand our repetoire, and build **generative models** using neural networks. Specifically, we will learn how to build models which generate novel images that resemble a set of training images. What is a GAN?In 2014, [Goodfellow et al.](https://arxiv.org/abs/1406.2661) presented a method for training generative models called Generative Adversarial Networks (GANs for short). In a GAN, we build two different neural networks. Our first network is a traditional classification network, called the **discriminator**. We will train the discriminator to take images, and classify them as being real (belonging to the training set) or fake (not present in the training set). Our other network, called the **generator**, will take random noise as input and transform it using a neural network to produce images. The goal of the generator is to fool the discriminator into thinking the images it produced are real.We can think of this back and forth process of the generator ($G$) trying to fool the discriminator ($D$), and the discriminator trying to correctly classify real vs. fake as a minimax game:$$\underset{G}{\text{minimize}}\; \underset{D}{\text{maximize}}\; \mathbb{E}_{x \sim p_\text{data}}\left[\log D(x)\right] + \mathbb{E}_{z \sim p(z)}\left[\log \left(1-D(G(z))\right)\right]$$where $x \sim p_\text{data}$ are samples from the input data, $z \sim p(z)$ are the random noise samples, $G(z)$ are the generated images using the neural network generator $G$, and $D$ is the output of the discriminator, specifying the probability of an input being real. In [Goodfellow et al.](https://arxiv.org/abs/1406.2661), they analyze this minimax game and show how it relates to minimizing the Jensen-Shannon divergence between the training data distribution and the generated samples from $G$.To optimize this minimax game, we will aternate between taking gradient *descent* steps on the objective for $G$, and gradient *ascent* steps on the objective for $D$:1. update the **generator** ($G$) to minimize the probability of the __discriminator making the correct choice__. 2. update the **discriminator** ($D$) to maximize the probability of the __discriminator making the correct choice__.While these updates are useful for analysis, they do not perform well in practice. Instead, we will use a different objective when we update the generator: maximize the probability of the **discriminator making the incorrect choice**. This small change helps to allevaiate problems with the generator gradient vanishing when the discriminator is confident. This is the standard update used in most GAN papers, and was used in the original paper from [Goodfellow et al.](https://arxiv.org/abs/1406.2661). In this assignment, we will alternate the following updates:1. Update the generator ($G$) to maximize the probability of the discriminator making the incorrect choice on generated data:$$\underset{G}{\text{maximize}}\; \mathbb{E}_{z \sim p(z)}\left[\log D(G(z))\right]$$2. Update the discriminator ($D$), to maximize the probability of the discriminator making the correct choice on real and generated data:$$\underset{D}{\text{maximize}}\; \mathbb{E}_{x \sim p_\text{data}}\left[\log D(x)\right] + \mathbb{E}_{z \sim p(z)}\left[\log \left(1-D(G(z))\right)\right]$$ What else is there?Since 2014, GANs have exploded into a huge research area, with massive [workshops](https://sites.google.com/site/nips2016adversarial/), and [hundreds of new papers](https://github.com/hindupuravinash/the-gan-zoo). Compared to other approaches for generative models, they often produce the highest quality samples but are some of the most difficult and finicky models to train (see [this github repo](https://github.com/soumith/ganhacks) that contains a set of 17 hacks that are useful for getting models working). Improving the stabiilty and robustness of GAN training is an open research question, with new papers coming out every day! For a more recent tutorial on GANs, see [here](https://arxiv.org/abs/1701.00160). There is also some even more recent exciting work that changes the objective function to Wasserstein distance and yields much more stable results across model architectures: [WGAN](https://arxiv.org/abs/1701.07875), [WGAN-GP](https://arxiv.org/abs/1704.00028).GANs are not the only way to train a generative model! For other approaches to generative modeling check out the [deep generative model chapter](http://www.deeplearningbook.org/contents/generative_models.html) of the Deep Learning [book](http://www.deeplearningbook.org). Another popular way of training neural networks as generative models is Variational Autoencoders (co-discovered [here](https://arxiv.org/abs/1312.6114) and [here](https://arxiv.org/abs/1401.4082)). Variational autoencoders combine neural networks with variational inference to train deep generative models. These models tend to be far more stable and easier to train but currently don't produce samples that are as pretty as GANs.Example pictures of what you should expect (yours might look slightly different): Setup
###Code
import tensorflow as tf
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# A bunch of utility functions
def show_images(images):
images = np.reshape(images, [images.shape[0], -1]) # images reshape to (batch_size, D)
sqrtn = int(np.ceil(np.sqrt(images.shape[0])))
sqrtimg = int(np.ceil(np.sqrt(images.shape[1])))
fig = plt.figure(figsize=(sqrtn, sqrtn))
gs = gridspec.GridSpec(sqrtn, sqrtn)
gs.update(wspace=0.05, hspace=0.05)
for i, img in enumerate(images):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(img.reshape([sqrtimg,sqrtimg]))
return
def preprocess_img(x):
return 2 * x - 1.0
def deprocess_img(x):
return (x + 1.0) / 2.0
def rel_error(x,y):
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
def count_params(model):
"""Count the number of parameters in the current TensorFlow graph """
param_count = np.sum([np.prod(p.shape) for p in model.weights])
return param_count
answers = np.load('gan-checks-tf.npz')
NOISE_DIM = 96
###Output
_____no_output_____
###Markdown
Dataset GANs are notoriously finicky with hyperparameters, and also require many training epochs. In order to make this assignment approachable without a GPU, we will be working on the MNIST dataset, which is 60,000 training and 10,000 test images. Each picture contains a centered image of white digit on black background (0 through 9). This was one of the first datasets used to train convolutional neural networks and it is fairly easy -- a standard CNN model can easily exceed 99% accuracy. **Heads-up**: Our MNIST wrapper returns images as vectors. That is, they're size (batch, 784). If you want to treat them as images, we have to resize them to (batch,28,28) or (batch,28,28,1). They are also type np.float32 and bounded [0,1].
###Code
class MNIST(object):
def __init__(self, batch_size, shuffle=False):
"""
Construct an iterator object over the MNIST data
Inputs:
- batch_size: Integer giving number of elements per minibatch
- shuffle: (optional) Boolean, whether to shuffle the data on each epoch
"""
train, _ = tf.keras.datasets.mnist.load_data()
X, y = train
X = X.astype(np.float32)/255
X = X.reshape((X.shape[0], -1))
self.X, self.y = X, y
self.batch_size, self.shuffle = batch_size, shuffle
def __iter__(self):
N, B = self.X.shape[0], self.batch_size
idxs = np.arange(N)
if self.shuffle:
np.random.shuffle(idxs)
return iter((self.X[i:i+B], self.y[i:i+B]) for i in range(0, N, B))
# show a batch
mnist = MNIST(batch_size=16)
show_images(mnist.X[:16])
###Output
_____no_output_____
###Markdown
LeakyReLUIn the cell below, you should implement a LeakyReLU. See the [class notes](http://cs231n.github.io/neural-networks-1/) (where alpha is small number) or equation (3) in [this paper](http://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf). LeakyReLUs keep ReLU units from dying and are often used in GAN methods (as are maxout units, however those increase model size and therefore are not used in this notebook).HINT: You should be able to use `tf.maximum`
###Code
def leaky_relu(x, alpha=0.01):
"""Compute the leaky ReLU activation function.
Inputs:
- x: TensorFlow Tensor with arbitrary shape
- alpha: leak parameter for leaky ReLU
Returns:
TensorFlow Tensor with the same shape as x
"""
# TODO: implement leaky ReLU
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
###Output
_____no_output_____
###Markdown
Test your leaky ReLU implementation. You should get errors < 1e-10
###Code
def test_leaky_relu(x, y_true):
y = leaky_relu(tf.constant(x))
print('Maximum error: %g'%rel_error(y_true, y))
test_leaky_relu(answers['lrelu_x'], answers['lrelu_y'])
###Output
_____no_output_____
###Markdown
Random NoiseGenerate a TensorFlow `Tensor` containing uniform noise from -1 to 1 with shape `[batch_size, dim]`.
###Code
def sample_noise(batch_size, dim):
"""Generate random uniform noise from -1 to 1.
Inputs:
- batch_size: integer giving the batch size of noise to generate
- dim: integer giving the dimension of the noise to generate
Returns:
TensorFlow Tensor containing uniform noise in [-1, 1] with shape [batch_size, dim]
"""
# TODO: sample and return noise
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
###Output
_____no_output_____
###Markdown
Make sure noise is the correct shape and type:
###Code
def test_sample_noise():
batch_size = 3
dim = 4
z = sample_noise(batch_size, dim)
# Check z has the correct shape
assert z.get_shape().as_list() == [batch_size, dim]
# Make sure z is a Tensor and not a numpy array
assert isinstance(z, tf.Tensor)
# Check that we get different noise for different evaluations
z1 = sample_noise(batch_size, dim)
z2 = sample_noise(batch_size, dim)
assert not np.array_equal(z1, z2)
# Check that we get the correct range
assert np.all(z1 >= -1.0) and np.all(z1 <= 1.0)
print("All tests passed!")
test_sample_noise()
###Output
_____no_output_____
###Markdown
DiscriminatorOur first step is to build a discriminator. **Hint:** You should use the layers in `tf.keras.layers` to build the model.All fully connected layers should include bias terms. For initialization, just use the default initializer used by the `tf.keras.layers` functions.Architecture: * Fully connected layer with input size 784 and output size 256 * LeakyReLU with alpha 0.01 * Fully connected layer with output size 256 * LeakyReLU with alpha 0.01 * Fully connected layer with output size 1 The output of the discriminator should thus have shape `[batch_size, 1]`, and contain real numbers corresponding to the scores that each of the `batch_size` inputs is a real image.
###Code
def discriminator():
"""Compute discriminator score for a batch of input images.
Inputs:
- x: TensorFlow Tensor of flattened input images, shape [batch_size, 784]
Returns:
TensorFlow Tensor with shape [batch_size, 1], containing the score
for an image being real for each input image.
"""
model = tf.keras.models.Sequential([
# TODO: implement architecture
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
])
return model
###Output
_____no_output_____
###Markdown
Test to make sure the number of parameters in the discriminator is correct:
###Code
def test_discriminator(true_count=267009):
model = discriminator()
cur_count = count_params(model)
if cur_count != true_count:
print('Incorrect number of parameters in discriminator. {0} instead of {1}. Check your achitecture.'.format(cur_count,true_count))
else:
print('Correct number of parameters in discriminator.')
test_discriminator()
###Output
_____no_output_____
###Markdown
GeneratorNow to build a generator. You should use the layers in `tf.keras.layers` to construct the model. All fully connected layers should include bias terms. Note that you can use the tf.nn module to access activation functions. Once again, use the default initializers for parameters.Architecture: * Fully connected layer with inupt size tf.shape(z)[1] (the number of noise dimensions) and output size 1024 * `ReLU` * Fully connected layer with output size 1024 * `ReLU` * Fully connected layer with output size 784 * `TanH` (To restrict every element of the output to be in the range [-1,1])
###Code
def generator(noise_dim=NOISE_DIM):
"""Generate images from a random noise vector.
Inputs:
- z: TensorFlow Tensor of random noise with shape [batch_size, noise_dim]
Returns:
TensorFlow Tensor of generated images, with shape [batch_size, 784].
"""
model = tf.keras.models.Sequential([
# TODO: implement architecture
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
])
return model
###Output
_____no_output_____
###Markdown
Test to make sure the number of parameters in the generator is correct:
###Code
def test_generator(true_count=1858320):
model = generator(4)
cur_count = count_params(model)
if cur_count != true_count:
print('Incorrect number of parameters in generator. {0} instead of {1}. Check your achitecture.'.format(cur_count,true_count))
else:
print('Correct number of parameters in generator.')
test_generator()
###Output
_____no_output_____
###Markdown
GAN LossCompute the generator and discriminator loss. The generator loss is:$$\ell_G = -\mathbb{E}_{z \sim p(z)}\left[\log D(G(z))\right]$$and the discriminator loss is:$$ \ell_D = -\mathbb{E}_{x \sim p_\text{data}}\left[\log D(x)\right] - \mathbb{E}_{z \sim p(z)}\left[\log \left(1-D(G(z))\right)\right]$$Note that these are negated from the equations presented earlier as we will be *minimizing* these losses.**HINTS**: Use [tf.ones](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/ones) and [tf.zeros](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/zeros) to generate labels for your discriminator. Use [tf.keras.losses.BinaryCrossentropy](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/losses/BinaryCrossentropy) to help compute your loss function.
###Code
def discriminator_loss(logits_real, logits_fake):
"""
Computes the discriminator loss described above.
Inputs:
- logits_real: Tensor of shape (N, 1) giving scores for the real data.
- logits_fake: Tensor of shape (N, 1) giving scores for the fake data.
Returns:
- loss: Tensor containing (scalar) the loss for the discriminator.
"""
loss = None
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss
def generator_loss(logits_fake):
"""
Computes the generator loss described above.
Inputs:
- logits_fake: PyTorch Tensor of shape (N,) giving scores for the fake data.
Returns:
- loss: PyTorch Tensor containing the (scalar) loss for the generator.
"""
loss = None
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss
###Output
_____no_output_____
###Markdown
Test your GAN loss. Make sure both the generator and discriminator loss are correct. You should see errors less than 1e-8.
###Code
def test_discriminator_loss(logits_real, logits_fake, d_loss_true):
d_loss = discriminator_loss(tf.constant(logits_real),
tf.constant(logits_fake))
print("Maximum error in d_loss: %g"%rel_error(d_loss_true, d_loss))
test_discriminator_loss(answers['logits_real'], answers['logits_fake'],
answers['d_loss_true'])
def test_generator_loss(logits_fake, g_loss_true):
g_loss = generator_loss(tf.constant(logits_fake))
print("Maximum error in g_loss: %g"%rel_error(g_loss_true, g_loss))
test_generator_loss(answers['logits_fake'], answers['g_loss_true'])
###Output
_____no_output_____
###Markdown
Optimizing our lossMake an `Adam` optimizer with a 1e-3 learning rate, beta1=0.5 to mininize G_loss and D_loss separately. The trick of decreasing beta was shown to be effective in helping GANs converge in the [Improved Techniques for Training GANs](https://arxiv.org/abs/1606.03498) paper. In fact, with our current hyperparameters, if you set beta1 to the Tensorflow default of 0.9, there's a good chance your discriminator loss will go to zero and the generator will fail to learn entirely. In fact, this is a common failure mode in GANs; if your D(x) learns too fast (e.g. loss goes near zero), your G(z) is never able to learn. Often D(x) is trained with SGD with Momentum or RMSProp instead of Adam, but here we'll use Adam for both D(x) and G(z).
###Code
# TODO: create an AdamOptimizer for D_solver and G_solver
def get_solvers(learning_rate=1e-3, beta1=0.5):
"""Create solvers for GAN training.
Inputs:
- learning_rate: learning rate to use for both solvers
- beta1: beta1 parameter for both solvers (first moment decay)
Returns:
- D_solver: instance of tf.optimizers.Adam with correct learning_rate and beta1
- G_solver: instance of tf.optimizers.Adam with correct learning_rate and beta1
"""
D_solver = None
G_solver = None
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return D_solver, G_solver
###Output
_____no_output_____
###Markdown
Training a GAN!Well that wasn't so hard, was it? After the first epoch, you should see fuzzy outlines, clear shapes as you approach epoch 3, and decent shapes, about half of which will be sharp and clearly recognizable as we pass epoch 5. In our case, we'll simply train D(x) and G(z) with one batch each every iteration. However, papers often experiment with different schedules of training D(x) and G(z), sometimes doing one for more steps than the other, or even training each one until the loss gets "good enough" and then switching to training the other.
###Code
# a giant helper function
def run_a_gan(D, G, D_solver, G_solver, discriminator_loss, generator_loss,\
show_every=20, print_every=20, batch_size=128, num_epochs=10, noise_size=96):
"""Train a GAN for a certain number of epochs.
Inputs:
- D: Discriminator model
- G: Generator model
- D_solver: an Optimizer for Discriminator
- G_solver: an Optimizer for Generator
- generator_loss: Generator loss
- discriminator_loss: Discriminator loss
Returns:
Nothing
"""
mnist = MNIST(batch_size=batch_size, shuffle=True)
iter_count = 0
for epoch in range(num_epochs):
for (x, _) in mnist:
with tf.GradientTape() as tape:
real_data = x
logits_real = D(preprocess_img(real_data))
g_fake_seed = sample_noise(batch_size, noise_size)
fake_images = G(g_fake_seed)
logits_fake = D(tf.reshape(fake_images, [batch_size, 784]))
d_total_error = discriminator_loss(logits_real, logits_fake)
d_gradients = tape.gradient(d_total_error, D.trainable_variables)
D_solver.apply_gradients(zip(d_gradients, D.trainable_variables))
with tf.GradientTape() as tape:
g_fake_seed = sample_noise(batch_size, noise_size)
fake_images = G(g_fake_seed)
gen_logits_fake = D(tf.reshape(fake_images, [batch_size, 784]))
g_error = generator_loss(gen_logits_fake)
g_gradients = tape.gradient(g_error, G.trainable_variables)
G_solver.apply_gradients(zip(g_gradients, G.trainable_variables))
if (iter_count % show_every == 0):
print('Epoch: {}, Iter: {}, D: {:.4}, G:{:.4}'.format(epoch, iter_count,d_total_error,g_error))
imgs_numpy = fake_images.cpu().numpy()
show_images(imgs_numpy[0:16])
plt.show()
iter_count += 1
# random noise fed into our generator
z = sample_noise(batch_size, noise_size)
# generated images
G_sample = G(z)
print('Final images')
show_images(G_sample[:16])
plt.show()
###Output
_____no_output_____
###Markdown
Train your GAN! This should take about 10 minutes on a CPU, or about 2 minutes on GPU.
###Code
# Make the discriminator
D = discriminator()
# Make the generator
G = generator()
# Use the function you wrote earlier to get optimizers for the Discriminator and the Generator
D_solver, G_solver = get_solvers()
# Run it!
run_a_gan(D, G, D_solver, G_solver, discriminator_loss, generator_loss)
###Output
_____no_output_____
###Markdown
Least Squares GANWe'll now look at [Least Squares GAN](https://arxiv.org/abs/1611.04076), a newer, more stable alternative to the original GAN loss function. For this part, all we have to do is change the loss function and retrain the model. We'll implement equation (9) in the paper, with the generator loss:$$\ell_G = \frac{1}{2}\mathbb{E}_{z \sim p(z)}\left[\left(D(G(z))-1\right)^2\right]$$and the discriminator loss:$$ \ell_D = \frac{1}{2}\mathbb{E}_{x \sim p_\text{data}}\left[\left(D(x)-1\right)^2\right] + \frac{1}{2}\mathbb{E}_{z \sim p(z)}\left[ \left(D(G(z))\right)^2\right]$$**HINTS**: Instead of computing the expectation, we will be averaging over elements of the minibatch, so make sure to combine the loss by averaging instead of summing. When plugging in for $D(x)$ and $D(G(z))$ use the direct output from the discriminator (`score_real` and `score_fake`).
###Code
def ls_discriminator_loss(scores_real, scores_fake):
"""
Compute the Least-Squares GAN loss for the discriminator.
Inputs:
- scores_real: Tensor of shape (N, 1) giving scores for the real data.
- scores_fake: Tensor of shape (N, 1) giving scores for the fake data.
Outputs:
- loss: A Tensor containing the loss.
"""
loss = None
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss
def ls_generator_loss(scores_fake):
"""
Computes the Least-Squares GAN loss for the generator.
Inputs:
- scores_fake: Tensor of shape (N, 1) giving scores for the fake data.
Outputs:
- loss: A Tensor containing the loss.
"""
loss = None
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss
###Output
_____no_output_____
###Markdown
Test your LSGAN loss. You should see errors less than 1e-8.
###Code
def test_lsgan_loss(score_real, score_fake, d_loss_true, g_loss_true):
d_loss = ls_discriminator_loss(tf.constant(score_real), tf.constant(score_fake))
g_loss = ls_generator_loss(tf.constant(score_fake))
print("Maximum error in d_loss: %g"%rel_error(d_loss_true, d_loss))
print("Maximum error in g_loss: %g"%rel_error(g_loss_true, g_loss))
test_lsgan_loss(answers['logits_real'], answers['logits_fake'],
answers['d_loss_lsgan_true'], answers['g_loss_lsgan_true'])
###Output
_____no_output_____
###Markdown
Create new training steps so we instead minimize the LSGAN loss:
###Code
# Make the discriminator
D = discriminator()
# Make the generator
G = generator()
# Use the function you wrote earlier to get optimizers for the Discriminator and the Generator
D_solver, G_solver = get_solvers()
# Run it!
run_a_gan(D, G, D_solver, G_solver, ls_discriminator_loss, ls_generator_loss)
###Output
_____no_output_____
###Markdown
Deep Convolutional GANsIn the first part of the notebook, we implemented an almost direct copy of the original GAN network from Ian Goodfellow. However, this network architecture allows no real spatial reasoning. It is unable to reason about things like "sharp edges" in general because it lacks any convolutional layers. Thus, in this section, we will implement some of the ideas from [DCGAN](https://arxiv.org/abs/1511.06434), where we use convolutional networks as our discriminators and generators. DiscriminatorWe will use a discriminator inspired by the TensorFlow MNIST classification [tutorial](https://www.tensorflow.org/get_started/mnist/pros), which is able to get above 99% accuracy on the MNIST dataset fairly quickly. *Be sure to check the dimensions of x and reshape when needed*, fully connected blocks expect [N,D] Tensors while conv2d blocks expect [N,H,W,C] Tensors. Please use `tf.keras.layers` to define the following architecture:Architecture:* Conv2D: 32 Filters, 5x5, Stride 1, padding 0* Leaky ReLU(alpha=0.01)* Max Pool 2x2, Stride 2* Conv2D: 64 Filters, 5x5, Stride 1, padding 0* Leaky ReLU(alpha=0.01)* Max Pool 2x2, Stride 2* Flatten* Fully Connected with output size 4 x 4 x 64* Leaky ReLU(alpha=0.01)* Fully Connected with output size 1Once again, please use biases for all convolutional and fully connected layers, and use the default parameter initializers. Note that a padding of 0 can be accomplished with the 'VALID' padding option.
###Code
def discriminator():
"""Compute discriminator score for a batch of input images.
Inputs:
- x: TensorFlow Tensor of flattened input images, shape [batch_size, 784]
Returns:
TensorFlow Tensor with shape [batch_size, 1], containing the score
for an image being real for each input image.
"""
model = tf.keras.models.Sequential([
# TODO: implement architecture
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
])
return model
model = discriminator()
test_discriminator(1102721)
###Output
_____no_output_____
###Markdown
GeneratorFor the generator, we will copy the architecture exactly from the [InfoGAN paper](https://arxiv.org/pdf/1606.03657.pdf). See Appendix C.1 MNIST. Please use `tf.keras.layers` for your implementation. You might find the documentation for [tf.keras.layers.Conv2DTranspose](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/Conv2DTranspose) useful. The architecture is as follows.Architecture:* Fully connected with output size 1024 * `ReLU`* BatchNorm* Fully connected with output size 7 x 7 x 128 * `ReLU`* BatchNorm* Resize into Image Tensor of size 7, 7, 128* Conv2D^T (transpose): 64 filters of 4x4, stride 2* `ReLU`* BatchNorm* Conv2d^T (transpose): 1 filter of 4x4, stride 2* `TanH`Once again, use biases for the fully connected and transpose convolutional layers. Please use the default initializers for your parameters. For padding, choose the 'same' option for transpose convolutions. For Batch Normalization, assume we are always in 'training' mode.
###Code
def generator(noise_dim=NOISE_DIM):
"""Generate images from a random noise vector.
Inputs:
- z: TensorFlow Tensor of random noise with shape [batch_size, noise_dim]
Returns:
TensorFlow Tensor of generated images, with shape [batch_size, 784].
"""
model = tf.keras.models.Sequential()
# TODO: implement architecture
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return model
test_generator(6595521)
###Output
_____no_output_____
###Markdown
We have to recreate our network since we've changed our functions. Train and evaluate a DCGANThis is the one part of A3 that significantly benefits from using a GPU. It takes 3 minutes on a GPU for the requested five epochs. Or about 50 minutes on a dual core laptop on CPU (feel free to use 3 epochs if you do it on CPU).
###Code
# Make the discriminator
D = discriminator()
# Make the generator
G = generator()
# Use the function you wrote earlier to get optimizers for the Discriminator and the Generator
D_solver, G_solver = get_solvers()
# Run it!
run_a_gan(D, G, D_solver, G_solver, discriminator_loss, generator_loss, num_epochs=5)
###Output
_____no_output_____
###Markdown
Generative Adversarial Networks (GANs)So far in CS231N, all the applications of neural networks that we have explored have been **discriminative models** that take an input and are trained to produce a labeled output. This has ranged from straightforward classification of image categories to sentence generation (which was still phrased as a classification problem, our labels were in vocabulary space and we’d learned a recurrence to capture multi-word labels). In this notebook, we will expand our repetoire, and build **generative models** using neural networks. Specifically, we will learn how to build models which generate novel images that resemble a set of training images. What is a GAN?In 2014, [Goodfellow et al.](https://arxiv.org/abs/1406.2661) presented a method for training generative models called Generative Adversarial Networks (GANs for short). In a GAN, we build two different neural networks. Our first network is a traditional classification network, called the **discriminator**. We will train the discriminator to take images, and classify them as being real (belonging to the training set) or fake (not present in the training set). Our other network, called the **generator**, will take random noise as input and transform it using a neural network to produce images. The goal of the generator is to fool the discriminator into thinking the images it produced are real.We can think of this back and forth process of the generator ($G$) trying to fool the discriminator ($D$), and the discriminator trying to correctly classify real vs. fake as a minimax game:$$\underset{G}{\text{minimize}}\; \underset{D}{\text{maximize}}\; \mathbb{E}_{x \sim p_\text{data}}\left[\log D(x)\right] + \mathbb{E}_{z \sim p(z)}\left[\log \left(1-D(G(z))\right)\right]$$where $x \sim p_\text{data}$ are samples from the input data, $z \sim p(z)$ are the random noise samples, $G(z)$ are the generated images using the neural network generator $G$, and $D$ is the output of the discriminator, specifying the probability of an input being real. In [Goodfellow et al.](https://arxiv.org/abs/1406.2661), they analyze this minimax game and show how it relates to minimizing the Jensen-Shannon divergence between the training data distribution and the generated samples from $G$.To optimize this minimax game, we will aternate between taking gradient *descent* steps on the objective for $G$, and gradient *ascent* steps on the objective for $D$:1. update the **generator** ($G$) to minimize the probability of the __discriminator making the correct choice__. 2. update the **discriminator** ($D$) to maximize the probability of the __discriminator making the correct choice__.While these updates are useful for analysis, they do not perform well in practice. Instead, we will use a different objective when we update the generator: maximize the probability of the **discriminator making the incorrect choice**. This small change helps to allevaiate problems with the generator gradient vanishing when the discriminator is confident. This is the standard update used in most GAN papers, and was used in the original paper from [Goodfellow et al.](https://arxiv.org/abs/1406.2661). In this assignment, we will alternate the following updates:1. Update the generator ($G$) to maximize the probability of the discriminator making the incorrect choice on generated data:$$\underset{G}{\text{maximize}}\; \mathbb{E}_{z \sim p(z)}\left[\log D(G(z))\right]$$2. Update the discriminator ($D$), to maximize the probability of the discriminator making the correct choice on real and generated data:$$\underset{D}{\text{maximize}}\; \mathbb{E}_{x \sim p_\text{data}}\left[\log D(x)\right] + \mathbb{E}_{z \sim p(z)}\left[\log \left(1-D(G(z))\right)\right]$$ What else is there?Since 2014, GANs have exploded into a huge research area, with massive [workshops](https://sites.google.com/site/nips2016adversarial/), and [hundreds of new papers](https://github.com/hindupuravinash/the-gan-zoo). Compared to other approaches for generative models, they often produce the highest quality samples but are some of the most difficult and finicky models to train (see [this github repo](https://github.com/soumith/ganhacks) that contains a set of 17 hacks that are useful for getting models working). Improving the stabiilty and robustness of GAN training is an open research question, with new papers coming out every day! For a more recent tutorial on GANs, see [here](https://arxiv.org/abs/1701.00160). There is also some even more recent exciting work that changes the objective function to Wasserstein distance and yields much more stable results across model architectures: [WGAN](https://arxiv.org/abs/1701.07875), [WGAN-GP](https://arxiv.org/abs/1704.00028).GANs are not the only way to train a generative model! For other approaches to generative modeling check out the [deep generative model chapter](http://www.deeplearningbook.org/contents/generative_models.html) of the Deep Learning [book](http://www.deeplearningbook.org). Another popular way of training neural networks as generative models is Variational Autoencoders (co-discovered [here](https://arxiv.org/abs/1312.6114) and [here](https://arxiv.org/abs/1401.4082)). Variational autoencoders combine neural networks with variational inference to train deep generative models. These models tend to be far more stable and easier to train but currently don't produce samples that are as pretty as GANs.Example pictures of what you should expect (yours might look slightly different): Setup
###Code
import tensorflow as tf
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
# A bunch of utility functions
def show_images(images):
images = np.reshape(images, [images.shape[0], -1]) # images reshape to (batch_size, D)
sqrtn = int(np.ceil(np.sqrt(images.shape[0])))
sqrtimg = int(np.ceil(np.sqrt(images.shape[1])))
fig = plt.figure(figsize=(sqrtn, sqrtn))
gs = gridspec.GridSpec(sqrtn, sqrtn)
gs.update(wspace=0.05, hspace=0.05)
for i, img in enumerate(images):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(img.reshape([sqrtimg,sqrtimg]))
return
from cs231n.gan_tf import preprocess_img, deprocess_img, rel_error, count_params, MNIST
NOISE_DIM = 96
# Colab users only
%cd drive/My\ Drive/$FOLDERNAME/
%cp -r gan-checks-tf.npz /content/
%cd /content/
answers = np.load('gan-checks-tf.npz')
###Output
_____no_output_____
###Markdown
Dataset GANs are notoriously finicky with hyperparameters, and also require many training epochs. In order to make this assignment approachable without a GPU, we will be working on the MNIST dataset, which is 60,000 training and 10,000 test images. Each picture contains a centered image of white digit on black background (0 through 9). This was one of the first datasets used to train convolutional neural networks and it is fairly easy -- a standard CNN model can easily exceed 99% accuracy. **Heads-up**: Our MNIST wrapper returns images as vectors. That is, they're size (batch, 784). If you want to treat them as images, we have to resize them to (batch,28,28) or (batch,28,28,1). They are also type np.float32 and bounded [0,1].
###Code
# show a batch
mnist = MNIST(batch_size=16)
show_images(mnist.X[:16])
###Output
_____no_output_____
###Markdown
LeakyReLUIn the cell below, you should implement a LeakyReLU. See the [class notes](http://cs231n.github.io/neural-networks-1/) (where alpha is small number) or equation (3) in [this paper](http://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf). LeakyReLUs keep ReLU units from dying and are often used in GAN methods (as are maxout units, however those increase model size and therefore are not used in this notebook).HINT: You should be able to use `tf.maximum`Implement `leaky_relu` in `cs231n/gan_tf.py` Test your leaky ReLU implementation. You should get errors < 1e-10
###Code
from cs231n.gan_tf import leaky_relu
def test_leaky_relu(x, y_true):
y = leaky_relu(tf.constant(x))
print('Maximum error: %g'%rel_error(y_true, y))
test_leaky_relu(answers['lrelu_x'], answers['lrelu_y'])
###Output
_____no_output_____
###Markdown
Random NoiseGenerate a TensorFlow `Tensor` containing uniform noise from -1 to 1 with shape `[batch_size, dim]`.Implement `sample_noise` in `cs231n/gan_tf.py` Make sure noise is the correct shape and type:
###Code
from cs231n.gan_tf import sample_noise
def test_sample_noise():
batch_size = 3
dim = 4
z = sample_noise(batch_size, dim)
# Check z has the correct shape
assert z.get_shape().as_list() == [batch_size, dim]
# Make sure z is a Tensor and not a numpy array
assert isinstance(z, tf.Tensor)
# Check that we get different noise for different evaluations
z1 = sample_noise(batch_size, dim)
z2 = sample_noise(batch_size, dim)
assert not np.array_equal(z1, z2)
# Check that we get the correct range
assert np.all(z1 >= -1.0) and np.all(z1 <= 1.0)
print("All tests passed!")
test_sample_noise()
###Output
_____no_output_____
###Markdown
DiscriminatorOur first step is to build a discriminator. **Hint:** You should use the layers in `tf.keras.layers` to build the model.All fully connected layers should include bias terms. For initialization, just use the default initializer used by the `tf.keras.layers` functions.Architecture: * Fully connected layer with input size 784 and output size 256 * LeakyReLU with alpha 0.01 * Fully connected layer with output size 256 * LeakyReLU with alpha 0.01 * Fully connected layer with output size 1 The output of the discriminator should thus have shape `[batch_size, 1]`, and contain real numbers corresponding to the scores that each of the `batch_size` inputs is a real image.Implement `discriminator()` in `cs231n/gan_tf.py` Test to make sure the number of parameters in the discriminator is correct:
###Code
from cs231n.gan_tf import discriminator
def test_discriminator(true_count=267009, discriminator=discriminator):
model = discriminator()
cur_count = count_params(model)
if cur_count != true_count:
print('Incorrect number of parameters in discriminator. {0} instead of {1}. Check your achitecture.'.format(cur_count,true_count))
else:
print('Correct number of parameters in discriminator.')
test_discriminator()
###Output
_____no_output_____
###Markdown
GeneratorNow to build a generator. You should use the layers in `tf.keras.layers` to construct the model. All fully connected layers should include bias terms. Note that you can use the tf.nn module to access activation functions. Once again, use the default initializers for parameters.Architecture: * Fully connected layer with inupt size tf.shape(z)[1] (the number of noise dimensions) and output size 1024 * `ReLU` * Fully connected layer with output size 1024 * `ReLU` * Fully connected layer with output size 784 * `TanH` (To restrict every element of the output to be in the range [-1,1]) Implement `generator()` in `cs231n/gan_tf.py` Test to make sure the number of parameters in the generator is correct:
###Code
from cs231n.gan_tf import generator
def test_generator(true_count=1858320, generator=generator):
model = generator(4)
cur_count = count_params(model)
if cur_count != true_count:
print('Incorrect number of parameters in generator. {0} instead of {1}. Check your achitecture.'.format(cur_count,true_count))
else:
print('Correct number of parameters in generator.')
test_generator()
###Output
_____no_output_____
###Markdown
GAN LossCompute the generator and discriminator loss. The generator loss is:$$\ell_G = -\mathbb{E}_{z \sim p(z)}\left[\log D(G(z))\right]$$and the discriminator loss is:$$ \ell_D = -\mathbb{E}_{x \sim p_\text{data}}\left[\log D(x)\right] - \mathbb{E}_{z \sim p(z)}\left[\log \left(1-D(G(z))\right)\right]$$Note that these are negated from the equations presented earlier as we will be *minimizing* these losses.**HINTS**: Use [tf.ones](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/ones) and [tf.zeros](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/zeros) to generate labels for your discriminator. Use [tf.keras.losses.BinaryCrossentropy](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/losses/BinaryCrossentropy) to help compute your loss function.Implement `discriminator_loss, generator_loss` in `cs231n/gan_tf.py` Test your GAN loss. Make sure both the generator and discriminator loss are correct. You should see errors less than 1e-8.
###Code
from cs231n.gan_tf import discriminator_loss
def test_discriminator_loss(logits_real, logits_fake, d_loss_true):
d_loss = discriminator_loss(tf.constant(logits_real),
tf.constant(logits_fake))
print("Maximum error in d_loss: %g"%rel_error(d_loss_true, d_loss))
test_discriminator_loss(answers['logits_real'], answers['logits_fake'],
answers['d_loss_true'])
from cs231n.gan_tf import generator_loss
def test_generator_loss(logits_fake, g_loss_true):
g_loss = generator_loss(tf.constant(logits_fake))
print("Maximum error in g_loss: %g"%rel_error(g_loss_true, g_loss))
test_generator_loss(answers['logits_fake'], answers['g_loss_true'])
###Output
_____no_output_____
###Markdown
Optimizing our lossMake an `Adam` optimizer with a 1e-3 learning rate, beta1=0.5 to mininize G_loss and D_loss separately. The trick of decreasing beta was shown to be effective in helping GANs converge in the [Improved Techniques for Training GANs](https://arxiv.org/abs/1606.03498) paper. In fact, with our current hyperparameters, if you set beta1 to the Tensorflow default of 0.9, there's a good chance your discriminator loss will go to zero and the generator will fail to learn entirely. In fact, this is a common failure mode in GANs; if your D(x) learns too fast (e.g. loss goes near zero), your G(z) is never able to learn. Often D(x) is trained with SGD with Momentum or RMSProp instead of Adam, but here we'll use Adam for both D(x) and G(z). Implement `get_solvers` in `cs231n/gan_tf.py`
###Code
from cs231n.gan_tf import get_solvers
###Output
_____no_output_____
###Markdown
Training a GAN!Well that wasn't so hard, was it? After the first epoch, you should see fuzzy outlines, clear shapes as you approach epoch 3, and decent shapes, about half of which will be sharp and clearly recognizable as we pass epoch 5. In our case, we'll simply train D(x) and G(z) with one batch each every iteration. However, papers often experiment with different schedules of training D(x) and G(z), sometimes doing one for more steps than the other, or even training each one until the loss gets "good enough" and then switching to training the other. **If you are a Colab user, it is recommeded to change colab runtime to GPU.** Train your GAN! This should take about 10 minutes on a CPU, or about 2 minutes on GPU.
###Code
from cs231n.gan_tf import run_a_gan
# Make the discriminator
D = discriminator()
# Make the generator
G = generator()
# Use the function you wrote earlier to get optimizers for the Discriminator and the Generator
D_solver, G_solver = get_solvers()
# Run it!
images, final = run_a_gan(D, G, D_solver, G_solver, discriminator_loss, generator_loss)
numIter = 0
for img in images:
print("Iter: {}".format(numIter))
show_images(img)
plt.show()
numIter += 20
print()
###Output
_____no_output_____
###Markdown
**Please tag the cell below on Gradescope while submitting.**
###Code
print('Vanilla GAN Final images')
show_images(final)
plt.show()
###Output
_____no_output_____
###Markdown
Least Squares GANWe'll now look at [Least Squares GAN](https://arxiv.org/abs/1611.04076), a newer, more stable alternative to the original GAN loss function. For this part, all we have to do is change the loss function and retrain the model. We'll implement equation (9) in the paper, with the generator loss:$$\ell_G = \frac{1}{2}\mathbb{E}_{z \sim p(z)}\left[\left(D(G(z))-1\right)^2\right]$$and the discriminator loss:$$ \ell_D = \frac{1}{2}\mathbb{E}_{x \sim p_\text{data}}\left[\left(D(x)-1\right)^2\right] + \frac{1}{2}\mathbb{E}_{z \sim p(z)}\left[ \left(D(G(z))\right)^2\right]$$**HINTS**: Instead of computing the expectation, we will be averaging over elements of the minibatch, so make sure to combine the loss by averaging instead of summing. When plugging in for $D(x)$ and $D(G(z))$ use the direct output from the discriminator (`score_real` and `score_fake`).Implement `ls_discriminator_loss, ls_generator_loss` in `cs231n/gan_tf.py` Test your LSGAN loss. You should see errors less than 1e-8.
###Code
from cs231n.gan_tf import ls_discriminator_loss, ls_generator_loss
def test_lsgan_loss(score_real, score_fake, d_loss_true, g_loss_true):
d_loss = ls_discriminator_loss(tf.constant(score_real), tf.constant(score_fake))
g_loss = ls_generator_loss(tf.constant(score_fake))
print("Maximum error in d_loss: %g"%rel_error(d_loss_true, d_loss))
print("Maximum error in g_loss: %g"%rel_error(g_loss_true, g_loss))
test_lsgan_loss(answers['logits_real'], answers['logits_fake'],
answers['d_loss_lsgan_true'], answers['g_loss_lsgan_true'])
###Output
_____no_output_____
###Markdown
Create new training steps so we instead minimize the LSGAN loss:
###Code
# Make the discriminator
D = discriminator()
# Make the generator
G = generator()
# Use the function you wrote earlier to get optimizers for the Discriminator and the Generator
D_solver, G_solver = get_solvers()
# Run it!
images, final = run_a_gan(D, G, D_solver, G_solver, ls_discriminator_loss, ls_generator_loss)
numIter = 0
for img in images:
print("Iter: {}".format(numIter))
show_images(img)
plt.show()
numIter += 20
print()
###Output
_____no_output_____
###Markdown
**Please tag the cell below on Gradescope while submitting.**
###Code
print('LSGAN Final images')
show_images(final)
plt.show()
###Output
_____no_output_____
###Markdown
Deep Convolutional GANsIn the first part of the notebook, we implemented an almost direct copy of the original GAN network from Ian Goodfellow. However, this network architecture allows no real spatial reasoning. It is unable to reason about things like "sharp edges" in general because it lacks any convolutional layers. Thus, in this section, we will implement some of the ideas from [DCGAN](https://arxiv.org/abs/1511.06434), where we use convolutional networks as our discriminators and generators. DiscriminatorWe will use a discriminator inspired by the TensorFlow MNIST classification [tutorial](https://www.tensorflow.org/get_started/mnist/pros), which is able to get above 99% accuracy on the MNIST dataset fairly quickly. *Be sure to check the dimensions of x and reshape when needed*, fully connected blocks expect [N,D] Tensors while conv2d blocks expect [N,H,W,C] Tensors. Please use `tf.keras.layers` to define the following architecture:Architecture:* Conv2D: 32 Filters, 5x5, Stride 1, padding 0* Leaky ReLU(alpha=0.01)* Max Pool 2x2, Stride 2* Conv2D: 64 Filters, 5x5, Stride 1, padding 0* Leaky ReLU(alpha=0.01)* Max Pool 2x2, Stride 2* Flatten* Fully Connected with output size 4 x 4 x 64* Leaky ReLU(alpha=0.01)* Fully Connected with output size 1Once again, please use biases for all convolutional and fully connected layers, and use the default parameter initializers. Note that a padding of 0 can be accomplished with the 'VALID' padding option.Implement `dc_discriminator` in `cs231n/gan_tf.py`
###Code
from cs231n.gan_tf import dc_discriminator
# model = dc_discriminator()
test_discriminator(1102721, dc_discriminator)
###Output
_____no_output_____
###Markdown
GeneratorFor the generator, we will copy the architecture exactly from the [InfoGAN paper](https://arxiv.org/pdf/1606.03657.pdf). See Appendix C.1 MNIST. Please use `tf.keras.layers` for your implementation. You might find the documentation for [tf.keras.layers.Conv2DTranspose](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/Conv2DTranspose) useful. The architecture is as follows.Architecture:* Fully connected with output size 1024 * `ReLU`* BatchNorm* Fully connected with output size 7 x 7 x 128 * `ReLU`* BatchNorm* Resize into Image Tensor of size 7, 7, 128* Conv2D^T (transpose): 64 filters of 4x4, stride 2* `ReLU`* BatchNorm* Conv2d^T (transpose): 1 filter of 4x4, stride 2* `TanH`Once again, use biases for the fully connected and transpose convolutional layers. Please use the default initializers for your parameters. For padding, choose the 'same' option for transpose convolutions. For Batch Normalization, assume we are always in 'training' mode.Implement `dc_generator` in `cs231n/gan_tf.py`
###Code
from cs231n.gan_tf import dc_generator
test_generator(6595521, generator=dc_generator)
###Output
_____no_output_____
###Markdown
We have to recreate our network since we've changed our functions. Train and evaluate a DCGANThis is the one part of A3 that significantly benefits from using a GPU. It takes 3 minutes on a GPU for the requested five epochs. Or about 50 minutes on a dual core laptop on CPU (feel free to use 3 epochs if you do it on CPU).
###Code
# Make the discriminator
D = dc_discriminator()
# Make the generator
G = dc_generator()
# Use the function you wrote earlier to get optimizers for the Discriminator and the Generator
D_solver, G_solver = get_solvers()
# Run it!
images, final = run_a_gan(D, G, D_solver, G_solver, discriminator_loss, generator_loss, num_epochs=5)
numIter = 0
for img in images:
print("Iter: {}".format(numIter))
show_images(img)
plt.show()
numIter += 20
print()
###Output
_____no_output_____
###Markdown
**Please tag the cell below on Gradescope while submitting.**
###Code
print('DCGAN Final images')
show_images(final)
plt.show()
###Output
_____no_output_____
###Markdown
Generative Adversarial Networks (GANs)So far in CS231N, all the applications of neural networks that we have explored have been **discriminative models** that take an input and are trained to produce a labeled output. This has ranged from straightforward classification of image categories to sentence generation (which was still phrased as a classification problem, our labels were in vocabulary space and we’d learned a recurrence to capture multi-word labels). In this notebook, we will expand our repetoire, and build **generative models** using neural networks. Specifically, we will learn how to build models which generate novel images that resemble a set of training images. What is a GAN?In 2014, [Goodfellow et al.](https://arxiv.org/abs/1406.2661) presented a method for training generative models called Generative Adversarial Networks (GANs for short). In a GAN, we build two different neural networks. Our first network is a traditional classification network, called the **discriminator**. We will train the discriminator to take images, and classify them as being real (belonging to the training set) or fake (not present in the training set). Our other network, called the **generator**, will take random noise as input and transform it using a neural network to produce images. The goal of the generator is to fool the discriminator into thinking the images it produced are real.We can think of this back and forth process of the generator ($G$) trying to fool the discriminator ($D$), and the discriminator trying to correctly classify real vs. fake as a minimax game:$$\underset{G}{\text{minimize}}\; \underset{D}{\text{maximize}}\; \mathbb{E}_{x \sim p_\text{data}}\left[\log D(x)\right] + \mathbb{E}_{z \sim p(z)}\left[\log \left(1-D(G(z))\right)\right]$$where $x \sim p_\text{data}$ are samples from the input data, $z \sim p(z)$ are the random noise samples, $G(z)$ are the generated images using the neural network generator $G$, and $D$ is the output of the discriminator, specifying the probability of an input being real. In [Goodfellow et al.](https://arxiv.org/abs/1406.2661), they analyze this minimax game and show how it relates to minimizing the Jensen-Shannon divergence between the training data distribution and the generated samples from $G$.To optimize this minimax game, we will aternate between taking gradient *descent* steps on the objective for $G$, and gradient *ascent* steps on the objective for $D$:1. update the **generator** ($G$) to minimize the probability of the __discriminator making the correct choice__. 2. update the **discriminator** ($D$) to maximize the probability of the __discriminator making the correct choice__.While these updates are useful for analysis, they do not perform well in practice. Instead, we will use a different objective when we update the generator: maximize the probability of the **discriminator making the incorrect choice**. This small change helps to allevaiate problems with the generator gradient vanishing when the discriminator is confident. This is the standard update used in most GAN papers, and was used in the original paper from [Goodfellow et al.](https://arxiv.org/abs/1406.2661). In this assignment, we will alternate the following updates:1. Update the generator ($G$) to maximize the probability of the discriminator making the incorrect choice on generated data:$$\underset{G}{\text{maximize}}\; \mathbb{E}_{z \sim p(z)}\left[\log D(G(z))\right]$$2. Update the discriminator ($D$), to maximize the probability of the discriminator making the correct choice on real and generated data:$$\underset{D}{\text{maximize}}\; \mathbb{E}_{x \sim p_\text{data}}\left[\log D(x)\right] + \mathbb{E}_{z \sim p(z)}\left[\log \left(1-D(G(z))\right)\right]$$ What else is there?Since 2014, GANs have exploded into a huge research area, with massive [workshops](https://sites.google.com/site/nips2016adversarial/), and [hundreds of new papers](https://github.com/hindupuravinash/the-gan-zoo). Compared to other approaches for generative models, they often produce the highest quality samples but are some of the most difficult and finicky models to train (see [this github repo](https://github.com/soumith/ganhacks) that contains a set of 17 hacks that are useful for getting models working). Improving the stabiilty and robustness of GAN training is an open research question, with new papers coming out every day! For a more recent tutorial on GANs, see [here](https://arxiv.org/abs/1701.00160). There is also some even more recent exciting work that changes the objective function to Wasserstein distance and yields much more stable results across model architectures: [WGAN](https://arxiv.org/abs/1701.07875), [WGAN-GP](https://arxiv.org/abs/1704.00028).GANs are not the only way to train a generative model! For other approaches to generative modeling check out the [deep generative model chapter](http://www.deeplearningbook.org/contents/generative_models.html) of the Deep Learning [book](http://www.deeplearningbook.org). Another popular way of training neural networks as generative models is Variational Autoencoders (co-discovered [here](https://arxiv.org/abs/1312.6114) and [here](https://arxiv.org/abs/1401.4082)). Variational autoencoders combine neural networks with variational inference to train deep generative models. These models tend to be far more stable and easier to train but currently don't produce samples that are as pretty as GANs.Example pictures of what you should expect (yours might look slightly different): Setup
###Code
import tensorflow as tf
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# A bunch of utility functions
def show_images(images):
images = np.reshape(images, [images.shape[0], -1]) # images reshape to (batch_size, D)
sqrtn = int(np.ceil(np.sqrt(images.shape[0])))
sqrtimg = int(np.ceil(np.sqrt(images.shape[1])))
fig = plt.figure(figsize=(sqrtn, sqrtn))
gs = gridspec.GridSpec(sqrtn, sqrtn)
gs.update(wspace=0.05, hspace=0.05)
for i, img in enumerate(images):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(img.reshape([sqrtimg,sqrtimg]))
return
def preprocess_img(x):
return 2 * x - 1.0
def deprocess_img(x):
return (x + 1.0) / 2.0
def rel_error(x,y):
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
def count_params(model):
"""Count the number of parameters in the current TensorFlow graph """
param_count = np.sum([np.prod(p.shape) for p in model.weights])
return param_count
answers = np.load('gan-checks-tf.npz')
NOISE_DIM = 96
###Output
_____no_output_____
###Markdown
Dataset GANs are notoriously finicky with hyperparameters, and also require many training epochs. In order to make this assignment approachable without a GPU, we will be working on the MNIST dataset, which is 60,000 training and 10,000 test images. Each picture contains a centered image of white digit on black background (0 through 9). This was one of the first datasets used to train convolutional neural networks and it is fairly easy -- a standard CNN model can easily exceed 99% accuracy. **Heads-up**: Our MNIST wrapper returns images as vectors. That is, they're size (batch, 784). If you want to treat them as images, we have to resize them to (batch,28,28) or (batch,28,28,1). They are also type np.float32 and bounded [0,1].
###Code
class MNIST(object):
def __init__(self, batch_size, shuffle=False):
"""
Construct an iterator object over the MNIST data
Inputs:
- batch_size: Integer giving number of elements per minibatch
- shuffle: (optional) Boolean, whether to shuffle the data on each epoch
"""
train, _ = tf.keras.datasets.mnist.load_data()
X, y = train
X = X.astype(np.float32)/255
X = X.reshape((X.shape[0], -1))
self.X, self.y = X, y
self.batch_size, self.shuffle = batch_size, shuffle
def __iter__(self):
N, B = self.X.shape[0], self.batch_size
idxs = np.arange(N)
if self.shuffle:
np.random.shuffle(idxs)
return iter((self.X[i:i+B], self.y[i:i+B]) for i in range(0, N, B))
# show a batch
mnist = MNIST(batch_size=16)
show_images(mnist.X[:16])
###Output
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11493376/11490434 [==============================] - 1s 0us/step
###Markdown
LeakyReLUIn the cell below, you should implement a LeakyReLU. See the [class notes](http://cs231n.github.io/neural-networks-1/) (where alpha is small number) or equation (3) in [this paper](http://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf). LeakyReLUs keep ReLU units from dying and are often used in GAN methods (as are maxout units, however those increase model size and therefore are not used in this notebook).HINT: You should be able to use `tf.maximum`
###Code
def leaky_relu(x, alpha=0.01):
"""Compute the leaky ReLU activation function.
Inputs:
- x: TensorFlow Tensor with arbitrary shape
- alpha: leak parameter for leaky ReLU
Returns:
TensorFlow Tensor with the same shape as x
"""
# TODO: implement leaky ReLU
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
###Output
_____no_output_____
###Markdown
Test your leaky ReLU implementation. You should get errors < 1e-10
###Code
def test_leaky_relu(x, y_true):
y = leaky_relu(tf.constant(x))
print('Maximum error: %g'%rel_error(y_true, y))
test_leaky_relu(answers['lrelu_x'], answers['lrelu_y'])
###Output
_____no_output_____
###Markdown
Random NoiseGenerate a TensorFlow `Tensor` containing uniform noise from -1 to 1 with shape `[batch_size, dim]`.
###Code
def sample_noise(batch_size, dim):
"""Generate random uniform noise from -1 to 1.
Inputs:
- batch_size: integer giving the batch size of noise to generate
- dim: integer giving the dimension of the noise to generate
Returns:
TensorFlow Tensor containing uniform noise in [-1, 1] with shape [batch_size, dim]
"""
# TODO: sample and return noise
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
###Output
_____no_output_____
###Markdown
Make sure noise is the correct shape and type:
###Code
def test_sample_noise():
batch_size = 3
dim = 4
z = sample_noise(batch_size, dim)
# Check z has the correct shape
assert z.get_shape().as_list() == [batch_size, dim]
# Make sure z is a Tensor and not a numpy array
assert isinstance(z, tf.Tensor)
# Check that we get different noise for different evaluations
z1 = sample_noise(batch_size, dim)
z2 = sample_noise(batch_size, dim)
assert not np.array_equal(z1, z2)
# Check that we get the correct range
assert np.all(z1 >= -1.0) and np.all(z1 <= 1.0)
print("All tests passed!")
test_sample_noise()
###Output
_____no_output_____
###Markdown
DiscriminatorOur first step is to build a discriminator. **Hint:** You should use the layers in `tf.keras.layers` to build the model.All fully connected layers should include bias terms. For initialization, just use the default initializer used by the `tf.keras.layers` functions.Architecture: * Fully connected layer with input size 784 and output size 256 * LeakyReLU with alpha 0.01 * Fully connected layer with output size 256 * LeakyReLU with alpha 0.01 * Fully connected layer with output size 1 The output of the discriminator should thus have shape `[batch_size, 1]`, and contain real numbers corresponding to the scores that each of the `batch_size` inputs is a real image.
###Code
def discriminator():
"""Compute discriminator score for a batch of input images.
Inputs:
- x: TensorFlow Tensor of flattened input images, shape [batch_size, 784]
Returns:
TensorFlow Tensor with shape [batch_size, 1], containing the score
for an image being real for each input image.
"""
model = tf.keras.models.Sequential([
# TODO: implement architecture
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
])
return model
###Output
_____no_output_____
###Markdown
Test to make sure the number of parameters in the discriminator is correct:
###Code
def test_discriminator(true_count=267009):
model = discriminator()
cur_count = count_params(model)
if cur_count != true_count:
print('Incorrect number of parameters in discriminator. {0} instead of {1}. Check your achitecture.'.format(cur_count,true_count))
else:
print('Correct number of parameters in discriminator.')
test_discriminator()
###Output
_____no_output_____
###Markdown
GeneratorNow to build a generator. You should use the layers in `tf.keras.layers` to construct the model. All fully connected layers should include bias terms. Note that you can use the tf.nn module to access activation functions. Once again, use the default initializers for parameters.Architecture: * Fully connected layer with inupt size tf.shape(z)[1] (the number of noise dimensions) and output size 1024 * `ReLU` * Fully connected layer with output size 1024 * `ReLU` * Fully connected layer with output size 784 * `TanH` (To restrict every element of the output to be in the range [-1,1])
###Code
def generator(noise_dim=NOISE_DIM):
"""Generate images from a random noise vector.
Inputs:
- z: TensorFlow Tensor of random noise with shape [batch_size, noise_dim]
Returns:
TensorFlow Tensor of generated images, with shape [batch_size, 784].
"""
model = tf.keras.models.Sequential([
# TODO: implement architecture
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
])
return model
###Output
_____no_output_____
###Markdown
Test to make sure the number of parameters in the generator is correct:
###Code
def test_generator(true_count=1858320):
model = generator(4)
cur_count = count_params(model)
if cur_count != true_count:
print('Incorrect number of parameters in generator. {0} instead of {1}. Check your achitecture.'.format(cur_count,true_count))
else:
print('Correct number of parameters in generator.')
test_generator()
###Output
_____no_output_____
###Markdown
GAN LossCompute the generator and discriminator loss. The generator loss is:$$\ell_G = -\mathbb{E}_{z \sim p(z)}\left[\log D(G(z))\right]$$and the discriminator loss is:$$ \ell_D = -\mathbb{E}_{x \sim p_\text{data}}\left[\log D(x)\right] - \mathbb{E}_{z \sim p(z)}\left[\log \left(1-D(G(z))\right)\right]$$Note that these are negated from the equations presented earlier as we will be *minimizing* these losses.**HINTS**: Use [tf.ones](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/ones) and [tf.zeros](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/zeros) to generate labels for your discriminator. Use [tf.keras.losses.BinaryCrossentropy](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/losses/BinaryCrossentropy) to help compute your loss function.
###Code
def discriminator_loss(logits_real, logits_fake):
"""
Computes the discriminator loss described above.
Inputs:
- logits_real: Tensor of shape (N, 1) giving scores for the real data.
- logits_fake: Tensor of shape (N, 1) giving scores for the fake data.
Returns:
- loss: Tensor containing (scalar) the loss for the discriminator.
"""
loss = None
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss
def generator_loss(logits_fake):
"""
Computes the generator loss described above.
Inputs:
- logits_fake: PyTorch Tensor of shape (N,) giving scores for the fake data.
Returns:
- loss: PyTorch Tensor containing the (scalar) loss for the generator.
"""
loss = None
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss
###Output
_____no_output_____
###Markdown
Test your GAN loss. Make sure both the generator and discriminator loss are correct. You should see errors less than 1e-8.
###Code
def test_discriminator_loss(logits_real, logits_fake, d_loss_true):
d_loss = discriminator_loss(tf.constant(logits_real),
tf.constant(logits_fake))
print("Maximum error in d_loss: %g"%rel_error(d_loss_true, d_loss))
test_discriminator_loss(answers['logits_real'], answers['logits_fake'],
answers['d_loss_true'])
def test_generator_loss(logits_fake, g_loss_true):
g_loss = generator_loss(tf.constant(logits_fake))
print("Maximum error in g_loss: %g"%rel_error(g_loss_true, g_loss))
test_generator_loss(answers['logits_fake'], answers['g_loss_true'])
###Output
_____no_output_____
###Markdown
Optimizing our lossMake an `Adam` optimizer with a 1e-3 learning rate, beta1=0.5 to mininize G_loss and D_loss separately. The trick of decreasing beta was shown to be effective in helping GANs converge in the [Improved Techniques for Training GANs](https://arxiv.org/abs/1606.03498) paper. In fact, with our current hyperparameters, if you set beta1 to the Tensorflow default of 0.9, there's a good chance your discriminator loss will go to zero and the generator will fail to learn entirely. In fact, this is a common failure mode in GANs; if your D(x) learns too fast (e.g. loss goes near zero), your G(z) is never able to learn. Often D(x) is trained with SGD with Momentum or RMSProp instead of Adam, but here we'll use Adam for both D(x) and G(z).
###Code
# TODO: create an AdamOptimizer for D_solver and G_solver
def get_solvers(learning_rate=1e-3, beta1=0.5):
"""Create solvers for GAN training.
Inputs:
- learning_rate: learning rate to use for both solvers
- beta1: beta1 parameter for both solvers (first moment decay)
Returns:
- D_solver: instance of tf.optimizers.Adam with correct learning_rate and beta1
- G_solver: instance of tf.optimizers.Adam with correct learning_rate and beta1
"""
D_solver = None
G_solver = None
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return D_solver, G_solver
###Output
_____no_output_____
###Markdown
Training a GAN!Well that wasn't so hard, was it? After the first epoch, you should see fuzzy outlines, clear shapes as you approach epoch 3, and decent shapes, about half of which will be sharp and clearly recognizable as we pass epoch 5. In our case, we'll simply train D(x) and G(z) with one batch each every iteration. However, papers often experiment with different schedules of training D(x) and G(z), sometimes doing one for more steps than the other, or even training each one until the loss gets "good enough" and then switching to training the other.
###Code
# a giant helper function
def run_a_gan(D, G, D_solver, G_solver, discriminator_loss, generator_loss,\
show_every=20, print_every=20, batch_size=128, num_epochs=10, noise_size=96):
"""Train a GAN for a certain number of epochs.
Inputs:
- D: Discriminator model
- G: Generator model
- D_solver: an Optimizer for Discriminator
- G_solver: an Optimizer for Generator
- generator_loss: Generator loss
- discriminator_loss: Discriminator loss
Returns:
Nothing
"""
mnist = MNIST(batch_size=batch_size, shuffle=True)
iter_count = 0
for epoch in range(num_epochs):
for (x, _) in mnist:
with tf.GradientTape() as tape:
real_data = x
logits_real = D(preprocess_img(real_data))
g_fake_seed = sample_noise(batch_size, noise_size)
fake_images = G(g_fake_seed)
logits_fake = D(tf.reshape(fake_images, [batch_size, 784]))
d_total_error = discriminator_loss(logits_real, logits_fake)
d_gradients = tape.gradient(d_total_error, D.trainable_variables)
D_solver.apply_gradients(zip(d_gradients, D.trainable_variables))
with tf.GradientTape() as tape:
g_fake_seed = sample_noise(batch_size, noise_size)
fake_images = G(g_fake_seed)
gen_logits_fake = D(tf.reshape(fake_images, [batch_size, 784]))
g_error = generator_loss(gen_logits_fake)
g_gradients = tape.gradient(g_error, G.trainable_variables)
G_solver.apply_gradients(zip(g_gradients, G.trainable_variables))
if (iter_count % show_every == 0):
print('Epoch: {}, Iter: {}, D: {:.4}, G:{:.4}'.format(epoch, iter_count,d_total_error,g_error))
imgs_numpy = fake_images.cpu().numpy()
show_images(imgs_numpy[0:16])
plt.show()
iter_count += 1
# random noise fed into our generator
z = sample_noise(batch_size, noise_size)
# generated images
G_sample = G(z)
print('Final images')
show_images(G_sample[:16])
plt.show()
###Output
_____no_output_____
###Markdown
Train your GAN! This should take about 10 minutes on a CPU, or about 2 minutes on GPU.
###Code
# Make the discriminator
D = discriminator()
# Make the generator
G = generator()
# Use the function you wrote earlier to get optimizers for the Discriminator and the Generator
D_solver, G_solver = get_solvers()
# Run it!
run_a_gan(D, G, D_solver, G_solver, discriminator_loss, generator_loss)
###Output
_____no_output_____
###Markdown
Least Squares GANWe'll now look at [Least Squares GAN](https://arxiv.org/abs/1611.04076), a newer, more stable alternative to the original GAN loss function. For this part, all we have to do is change the loss function and retrain the model. We'll implement equation (9) in the paper, with the generator loss:$$\ell_G = \frac{1}{2}\mathbb{E}_{z \sim p(z)}\left[\left(D(G(z))-1\right)^2\right]$$and the discriminator loss:$$ \ell_D = \frac{1}{2}\mathbb{E}_{x \sim p_\text{data}}\left[\left(D(x)-1\right)^2\right] + \frac{1}{2}\mathbb{E}_{z \sim p(z)}\left[ \left(D(G(z))\right)^2\right]$$**HINTS**: Instead of computing the expectation, we will be averaging over elements of the minibatch, so make sure to combine the loss by averaging instead of summing. When plugging in for $D(x)$ and $D(G(z))$ use the direct output from the discriminator (`score_real` and `score_fake`).
###Code
def ls_discriminator_loss(scores_real, scores_fake):
"""
Compute the Least-Squares GAN loss for the discriminator.
Inputs:
- scores_real: Tensor of shape (N, 1) giving scores for the real data.
- scores_fake: Tensor of shape (N, 1) giving scores for the fake data.
Outputs:
- loss: A Tensor containing the loss.
"""
loss = None
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss
def ls_generator_loss(scores_fake):
"""
Computes the Least-Squares GAN loss for the generator.
Inputs:
- scores_fake: Tensor of shape (N, 1) giving scores for the fake data.
Outputs:
- loss: A Tensor containing the loss.
"""
loss = None
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss
###Output
_____no_output_____
###Markdown
Test your LSGAN loss. You should see errors less than 1e-8.
###Code
def test_lsgan_loss(score_real, score_fake, d_loss_true, g_loss_true):
d_loss = ls_discriminator_loss(tf.constant(score_real), tf.constant(score_fake))
g_loss = ls_generator_loss(tf.constant(score_fake))
print("Maximum error in d_loss: %g"%rel_error(d_loss_true, d_loss))
print("Maximum error in g_loss: %g"%rel_error(g_loss_true, g_loss))
test_lsgan_loss(answers['logits_real'], answers['logits_fake'],
answers['d_loss_lsgan_true'], answers['g_loss_lsgan_true'])
###Output
_____no_output_____
###Markdown
Create new training steps so we instead minimize the LSGAN loss:
###Code
# Make the discriminator
D = discriminator()
# Make the generator
G = generator()
# Use the function you wrote earlier to get optimizers for the Discriminator and the Generator
D_solver, G_solver = get_solvers()
# Run it!
run_a_gan(D, G, D_solver, G_solver, ls_discriminator_loss, ls_generator_loss)
###Output
_____no_output_____
###Markdown
Deep Convolutional GANsIn the first part of the notebook, we implemented an almost direct copy of the original GAN network from Ian Goodfellow. However, this network architecture allows no real spatial reasoning. It is unable to reason about things like "sharp edges" in general because it lacks any convolutional layers. Thus, in this section, we will implement some of the ideas from [DCGAN](https://arxiv.org/abs/1511.06434), where we use convolutional networks as our discriminators and generators. DiscriminatorWe will use a discriminator inspired by the TensorFlow MNIST classification [tutorial](https://www.tensorflow.org/get_started/mnist/pros), which is able to get above 99% accuracy on the MNIST dataset fairly quickly. *Be sure to check the dimensions of x and reshape when needed*, fully connected blocks expect [N,D] Tensors while conv2d blocks expect [N,H,W,C] Tensors. Please use `tf.keras.layers` to define the following architecture:Architecture:* Conv2D: 32 Filters, 5x5, Stride 1, padding 0* Leaky ReLU(alpha=0.01)* Max Pool 2x2, Stride 2* Conv2D: 64 Filters, 5x5, Stride 1, padding 0* Leaky ReLU(alpha=0.01)* Max Pool 2x2, Stride 2* Flatten* Fully Connected with output size 4 x 4 x 64* Leaky ReLU(alpha=0.01)* Fully Connected with output size 1Once again, please use biases for all convolutional and fully connected layers, and use the default parameter initializers. Note that a padding of 0 can be accomplished with the 'VALID' padding option.
###Code
def discriminator():
"""Compute discriminator score for a batch of input images.
Inputs:
- x: TensorFlow Tensor of flattened input images, shape [batch_size, 784]
Returns:
TensorFlow Tensor with shape [batch_size, 1], containing the score
for an image being real for each input image.
"""
model = tf.keras.models.Sequential([
# TODO: implement architecture
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
])
return model
model = discriminator()
test_discriminator(1102721)
###Output
_____no_output_____
###Markdown
GeneratorFor the generator, we will copy the architecture exactly from the [InfoGAN paper](https://arxiv.org/pdf/1606.03657.pdf). See Appendix C.1 MNIST. Please use `tf.keras.layers` for your implementation. You might find the documentation for [tf.keras.layers.Conv2DTranspose](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/Conv2DTranspose) useful. The architecture is as follows.Architecture:* Fully connected with output size 1024 * `ReLU`* BatchNorm* Fully connected with output size 7 x 7 x 128 * `ReLU`* BatchNorm* Resize into Image Tensor of size 7, 7, 128* Conv2D^T (transpose): 64 filters of 4x4, stride 2* `ReLU`* BatchNorm* Conv2d^T (transpose): 1 filter of 4x4, stride 2* `TanH`Once again, use biases for the fully connected and transpose convolutional layers. Please use the default initializers for your parameters. For padding, choose the 'same' option for transpose convolutions. For Batch Normalization, assume we are always in 'training' mode.
###Code
def generator(noise_dim=NOISE_DIM):
"""Generate images from a random noise vector.
Inputs:
- z: TensorFlow Tensor of random noise with shape [batch_size, noise_dim]
Returns:
TensorFlow Tensor of generated images, with shape [batch_size, 784].
"""
model = tf.keras.models.Sequential()
# TODO: implement architecture
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return model
test_generator(6595521)
###Output
_____no_output_____
###Markdown
We have to recreate our network since we've changed our functions. Train and evaluate a DCGANThis is the one part of A3 that significantly benefits from using a GPU. It takes 3 minutes on a GPU for the requested five epochs. Or about 50 minutes on a dual core laptop on CPU (feel free to use 3 epochs if you do it on CPU).
###Code
# Make the discriminator
D = discriminator()
# Make the generator
G = generator()
# Use the function you wrote earlier to get optimizers for the Discriminator and the Generator
D_solver, G_solver = get_solvers()
# Run it!
run_a_gan(D, G, D_solver, G_solver, discriminator_loss, generator_loss, num_epochs=5)
###Output
_____no_output_____
###Markdown
Generative Adversarial Networks (GANs)So far in CS231N, all the applications of neural networks that we have explored have been **discriminative models** that take an input and are trained to produce a labeled output. This has ranged from straightforward classification of image categories to sentence generation (which was still phrased as a classification problem, our labels were in vocabulary space and we’d learned a recurrence to capture multi-word labels). In this notebook, we will expand our repetoire, and build **generative models** using neural networks. Specifically, we will learn how to build models which generate novel images that resemble a set of training images. What is a GAN?In 2014, [Goodfellow et al.](https://arxiv.org/abs/1406.2661) presented a method for training generative models called **`Generative Adversarial Networks`** (GANs for short). In a GAN, we build two different neural networks. 1. Our first network is a traditional classification network, called the **`discriminator`**. We will train the discriminator to take images, and classify them as being **real (belonging to the training set)** or **fake (not present in the training set)**. 2. Our other network, called the **`generator`**, will take random noise as input and transform it using a neural network to produce images. The goal of the generator is to fool the discriminator into thinking the images it produced are real.We can think of this back and forth process of the generator ($G$) trying to fool the discriminator ($D$), and the discriminator trying to correctly classify real vs. fake as a minimax game:$$\underset{G}{\text{minimize}}\; \underset{D}{\text{maximize}}\; \mathbb{E}_{x \sim p_\text{data}}\left[\log D(x)\right] + \mathbb{E}_{z \sim p(z)}\left[\log \left(1-D(G(z))\right)\right]$$where $x \sim p_\text{data}$ are samples from the input data, $z \sim p(z)$ are the random noise samples, $G(z)$ are the generated images using the neural network generator $G$, and $D$ is the output of the discriminator, specifying the probability of an input being real. In [Goodfellow et al.](https://arxiv.org/abs/1406.2661), **they analyze this minimax game and show how it relates to minimizing the Jensen-Shannon divergence between the training data distribution and the generated samples from $G$**.To optimize this minimax game, we will aternate between taking `gradient descent` steps on the objective for $G$, and `gradient ascent` steps on the objective for $D$:1. update the **generator** ($G$) to minimize the probability of the __discriminator making the correct choice__. 2. update the **discriminator** ($D$) to maximize the probability of the __discriminator making the correct choice__.While these updates are useful for analysis, they do not perform well in practice. Instead, we will use a different objective when we update the generator: maximize the probability of the **discriminator making the incorrect choice**. This small change helps to allevaiate problems with the generator gradient vanishing when the discriminator is confident. This is the standard update used in most GAN papers, and was used in the original paper from [Goodfellow et al.](https://arxiv.org/abs/1406.2661). In this assignment, we will alternate the following updates:1. Update the generator ($G$) to maximize the probability of the discriminator making the incorrect choice on generated data:$$\underset{G}{\text{maximize}}\; \mathbb{E}_{z \sim p(z)}\left[\log D(G(z))\right]$$2. Update the discriminator ($D$), to maximize the probability of the discriminator making the correct choice on real and generated data:$$\underset{D}{\text{maximize}}\; \mathbb{E}_{x \sim p_\text{data}}\left[\log D(x)\right] + \mathbb{E}_{z \sim p(z)}\left[\log \left(1-D(G(z))\right)\right]$$ What else is there?Since 2014, GANs have exploded into a huge research area, with massive [workshops](https://sites.google.com/site/nips2016adversarial/), and [hundreds of new papers](https://github.com/hindupuravinash/the-gan-zoo). Compared to other approaches for generative models, they often produce the highest quality samples but are some of the most difficult and finicky models to train (see [this github repo](https://github.com/soumith/ganhacks) that contains a set of **17 hacks that are useful for getting models working**). Improving the stabiilty and robustness of GAN training is an open research question, with new papers coming out every day! For a more recent tutorial on GANs, see [here](https://arxiv.org/abs/1701.00160). There is also some even more recent exciting work that **changes the objective function to Wasserstein distance and yields much more stable results across model architectures**: [WGAN](https://arxiv.org/abs/1701.07875), [WGAN-GP](https://arxiv.org/abs/1704.00028).GANs are not the only way to train a generative model! For other approaches to generative modeling check out the [deep generative model chapter](http://www.deeplearningbook.org/contents/generative_models.html) of the Deep Learning [book](http://www.deeplearningbook.org). Another popular way of training neural networks as generative models is **Variational Autoencoders** (co-discovered [here](https://arxiv.org/abs/1312.6114) and [here](https://arxiv.org/abs/1401.4082)). Variational autoencoders combine neural networks with variational inference to train deep generative models. These models tend to be far more stable and easier to train but currently don't produce samples that are as pretty as GANs.Example pictures of what you should expect (yours might look slightly different): Setup
###Code
import tensorflow as tf
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# A bunch of utility functions
def show_images(images):
images = np.reshape(images, [images.shape[0], -1]) # images reshape to (batch_size, D)
sqrtn = int(np.ceil(np.sqrt(images.shape[0])))
sqrtimg = int(np.ceil(np.sqrt(images.shape[1])))
fig = plt.figure(figsize=(sqrtn, sqrtn))
gs = gridspec.GridSpec(sqrtn, sqrtn)
gs.update(wspace=0.05, hspace=0.05)
for i, img in enumerate(images):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(img.reshape([sqrtimg,sqrtimg]))
return
def preprocess_img(x):
return 2 * x - 1.0
def deprocess_img(x):
return (x + 1.0) / 2.0
def rel_error(x,y):
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
def count_params(model):
"""Count the number of parameters in the current TensorFlow graph """
param_count = np.sum([np.prod(p.shape) for p in model.weights])
return param_count
answers = np.load('gan-checks-tf.npz')
NOISE_DIM = 96
###Output
_____no_output_____
###Markdown
Dataset GANs are notoriously finicky with hyperparameters, and also require many training epochs. In order to make this assignment approachable without a GPU, we will be working on the MNIST dataset, which is 60,000 training and 10,000 test images. Each picture contains a centered image of white digit on black background (0 through 9). This was one of the first datasets used to train convolutional neural networks and it is fairly easy -- a standard CNN model can easily exceed 99% accuracy. **Heads-up**: Our MNIST wrapper returns images as vectors. That is, they're size (batch, 784). If you want to treat them as images, we have to resize them to (batch,28,28) or (batch,28,28,1). They are also type np.float32 and bounded [0,1].
###Code
class MNIST(object):
def __init__(self, batch_size, shuffle=False):
"""
Construct an iterator object over the MNIST data
Inputs:
- batch_size: Integer giving number of elements per minibatch
- shuffle: (optional) Boolean, whether to shuffle the data on each epoch
"""
train, _ = tf.keras.datasets.mnist.load_data()
X, y = train
X = X.astype(np.float32)/255
X = X.reshape((X.shape[0], -1))
self.X, self.y = X, y
self.batch_size, self.shuffle = batch_size, shuffle
def __iter__(self):
N, B = self.X.shape[0], self.batch_size
idxs = np.arange(N)
if self.shuffle:
np.random.shuffle(idxs)
return iter((self.X[i:i+B], self.y[i:i+B]) for i in range(0, N, B))
# explore the data
train, _ = tf.keras.datasets.mnist.load_data()
X, y = train
X.shape
# show a batch
mnist = MNIST(batch_size=16)
show_images(mnist.X[:16])
###Output
_____no_output_____
###Markdown
LeakyReLUIn the cell below, you should implement a `LeakyReLU`. See the [class notes](http://cs231n.github.io/neural-networks-1/) (where alpha is small number) or equation (3) in [this paper](http://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf). LeakyReLUs keep ReLU units from dying and are often used in GAN methods (as are maxout units, however those increase model size and therefore are not used in this notebook).HINT: You should be able to use `tf.maximum`
###Code
def leaky_relu(x, alpha=0.01):
"""Compute the leaky ReLU activation function.
Inputs:
- x: TensorFlow Tensor with arbitrary shape
- alpha: leak parameter for leaky ReLU
Returns:
TensorFlow Tensor with the same shape as x
"""
# TODO: implement leaky ReLU
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
x = tf.math.maximum(tf.math.multiply(x, alpha), x)
return x
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
###Output
_____no_output_____
###Markdown
Test your leaky ReLU implementation. You should get errors < 1e-10
###Code
def test_leaky_relu(x, y_true):
y = leaky_relu(tf.constant(x))
print('Maximum error: %g'%rel_error(y_true, y))
test_leaky_relu(answers['lrelu_x'], answers['lrelu_y'])
###Output
Maximum error: 0
###Markdown
Random NoiseGenerate a TensorFlow `Tensor` containing uniform noise from -1 to 1 with shape `[batch_size, dim]`.
###Code
def sample_noise(batch_size, dim):
"""Generate random uniform noise from -1 to 1.
Inputs:
- batch_size: integer giving the batch size of noise to generate
- dim: integer giving the dimension of the noise to generate
Returns:
TensorFlow Tensor containing uniform noise in [-1, 1] with shape [batch_size, dim]
"""
# TODO: sample and return noise
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
z = tf.random.uniform((batch_size, dim), -1, 1)
return z
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
###Output
_____no_output_____
###Markdown
Make sure noise is the correct shape and type:
###Code
def test_sample_noise():
batch_size = 3
dim = 4
z = sample_noise(batch_size, dim)
# Check z has the correct shape
assert z.get_shape().as_list() == [batch_size, dim]
# Make sure z is a Tensor and not a numpy array
assert isinstance(z, tf.Tensor)
# Check that we get different noise for different evaluations
z1 = sample_noise(batch_size, dim)
z2 = sample_noise(batch_size, dim)
assert not np.array_equal(z1, z2)
# Check that we get the correct range
assert np.all(z1 >= -1.0) and np.all(z1 <= 1.0)
print("All tests passed!")
test_sample_noise()
###Output
All tests passed!
###Markdown
DiscriminatorOur first step is to build a discriminator. **Hint:** You should use the layers in `tf.keras.layers` to build the model.All fully connected layers should include bias terms. For initialization, just use the default initializer used by the `tf.keras.layers` functions.Architecture: * Fully connected layer with input size 784 and output size 256 * LeakyReLU with alpha 0.01 * Fully connected layer with output size 256 * LeakyReLU with alpha 0.01 * Fully connected layer with output size 1 The output of the discriminator should thus have shape `[batch_size, 1]`, and contain real numbers corresponding to the scores that each of the `batch_size` inputs is a real image.
###Code
def discriminator():
"""Compute discriminator score for a batch of input images.
Inputs:
- x: TensorFlow Tensor of flattened input images, shape [batch_size, 784]
Returns:
TensorFlow Tensor with shape [batch_size, 1], containing the score
for an image being real for each input image.
"""
model = tf.keras.models.Sequential([
# TODO: implement architecture
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# 1st layer
tf.keras.layers.Dense(256, input_shape=(784,)), tf.keras.layers.LeakyReLU(alpha=0.01),
# 2nd layer
tf.keras.layers.Dense(256), tf.keras.layers.LeakyReLU(alpha=0.01),
# 3rd layer
tf.keras.layers.Dense(1)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
])
return model
###Output
_____no_output_____
###Markdown
Test to make sure the number of parameters in the discriminator is correct:
###Code
def test_discriminator(true_count=267009):
model = discriminator()
cur_count = count_params(model)
if cur_count != true_count:
print('Incorrect number of parameters in discriminator. {0} instead of {1}. Check your achitecture.'.format(cur_count,true_count))
else:
print('Correct number of parameters in discriminator.')
test_discriminator()
###Output
Correct number of parameters in discriminator.
###Markdown
GeneratorNow to build a generator. You should use the layers in `tf.keras.layers` to construct the model. All fully connected layers should include bias terms. Note that you can use the `tf.nn` module to access activation functions. Once again, use the default initializers for parameters.Architecture: * Fully connected layer with inupt size tf.shape(z)[1] (the number of noise dimensions) and output size 1024 * `ReLU` * Fully connected layer with output size 1024 * `ReLU` * Fully connected layer with output size 784 * `TanH` (To restrict every element of the output to be in the range [-1,1]) the raw images are also rescaled to [-1, 1]
###Code
def generator(noise_dim=NOISE_DIM):
"""Generate images from a random noise vector.
Inputs:
- z: TensorFlow Tensor of random noise with shape [batch_size, noise_dim]
Returns:
TensorFlow Tensor of generated images, with shape [batch_size, 784].
"""
model = tf.keras.models.Sequential([
# TODO: implement architecture
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# 1st layer
tf.keras.layers.Dense(1024, input_shape=(noise_dim,), activation='relu'),
# 2nd layer
tf.keras.layers.Dense(1024, activation='relu'),
# 3rd layer
tf.keras.layers.Dense(784, activation='tanh')
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
])
return model
###Output
_____no_output_____
###Markdown
Test to make sure the number of parameters in the generator is correct:
###Code
def test_generator(true_count=1858320):
model = generator(4)
cur_count = count_params(model)
if cur_count != true_count:
print('Incorrect number of parameters in generator. {0} instead of {1}. Check your achitecture.'.format(cur_count,true_count))
else:
print('Correct number of parameters in generator.')
test_generator()
###Output
Correct number of parameters in generator.
###Markdown
GAN LossCompute the generator and discriminator loss. The generator loss is:$$\ell_G = -\mathbb{E}_{z \sim p(z)}\left[\log D(G(z))\right]$$and the discriminator loss is:$$ \ell_D = -\mathbb{E}_{x \sim p_\text{data}}\left[\log D(x)\right] - \mathbb{E}_{z \sim p(z)}\left[\log \left(1-D(G(z))\right)\right]$$**Note:** two terms are both binary cross-entropies1. $-\log D(x)$ is cross-entrop of real image whose real_label is $1$, so it is equal to $1*(-\log D(x)) + 0*(-\log(1-D(x)))=-\log D(x)$, and $\mathbb{E}_{x \sim p_\text{data}}$ means averaging across all observations in mini-batch2. $-\log \left(1-D(G(z))\right)$ is cross-entrop of fake image whose fake_label is $0$, so it is equal to $0*(-\log D(G(z))) + 1*(-\log(1-D(G(z))))=-\log(1-D(G(z)))$, and $\mathbb{E}_{z \sim p(z)}$ means averaging across all observations in mini-batch**Note that these are negated from the equations presented earlier as we will be *minimizing* these losses.****HINTS**: Use [tf.ones](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/ones) and [tf.zeros](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/zeros) to generate labels for your discriminator. Use [tf.losses.binary_crossentropy](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/losses/BinaryCrossentropy) to help compute your loss function.
###Code
def discriminator_loss(logits_real, logits_fake):
"""
Computes the discriminator loss described above.
Inputs:
- logits_real: Tensor of shape (N, 1) giving scores for the real data.
- logits_fake: Tensor of shape (N, 1) giving scores for the fake data.
Returns:
- loss: Tensor containing (scalar) the loss for the discriminator.
"""
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
N = logits_real.shape[0]
loss_real = tf.losses.binary_crossentropy(tf.ones([N, 1]), logits_real, from_logits=True)
loss_fake = tf.losses.binary_crossentropy(tf.zeros([N, 1]), logits_fake, from_logits=True)
loss = tf.math.reduce_mean(loss_real + loss_fake, axis=0)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss
def generator_loss(logits_fake):
"""
Computes the generator loss described above.
Inputs:
- logits_fake: Tensor of shape (N,) giving scores for the fake data.
Returns:
- loss: Tensor containing the (scalar) loss for the generator.
"""
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
N = logits_fake.shape[0]
loss = tf.losses.binary_crossentropy(tf.ones([N, 1]), logits_fake, from_logits=True)
loss = tf.math.reduce_mean(loss, axis=0)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss
###Output
_____no_output_____
###Markdown
Test your GAN loss. Make sure both the generator and discriminator loss are correct. You should see errors less than 1e-8.
###Code
def test_discriminator_loss(logits_real, logits_fake, d_loss_true):
d_loss = discriminator_loss(tf.constant(logits_real),
tf.constant(logits_fake))
print("Maximum error in d_loss: %g"%rel_error(d_loss_true, d_loss))
test_discriminator_loss(answers['logits_real'], answers['logits_fake'],
answers['d_loss_true'])
def test_generator_loss(logits_fake, g_loss_true):
g_loss = generator_loss(tf.constant(logits_fake))
print("Maximum error in g_loss: %g"%rel_error(g_loss_true, g_loss))
test_generator_loss(answers['logits_fake'], answers['g_loss_true'])
###Output
Maximum error in g_loss: 7.19722e-17
###Markdown
Optimizing our lossMake an `Adam` optimizer with a 1e-3 learning rate, beta1=0.5 to mininize G_loss and D_loss separately. The trick of **decreasing beta was shown to be effective in helping GANs converge** in the [Improved Techniques for Training GANs](https://arxiv.org/abs/1606.03498) paper. In fact, with our current hyperparameters, if you set beta1 to the Tensorflow default of 0.9, there's a good chance your discriminator loss will go to zero and the generator will fail to learn entirely. In fact, this is a common failure mode in GANs; **if your D(x) learns too fast (e.g. loss goes near zero), your G(z) is never able to learn**. **Often D(x) is trained with `SGD with Momentum` or `RMSProp` instead of `Adam`, but here we'll use Adam for both D(x) and G(z)**.
###Code
# TODO: create an AdamOptimizer for D_solver and G_solver
def get_solvers(learning_rate=1e-3, beta1=0.5):
"""Create solvers for GAN training.
Inputs:
- learning_rate: learning rate to use for both solvers
- beta1: beta1 parameter for both solvers (first moment decay)
Returns:
- D_solver: instance of tf.optimizers.Adam with correct learning_rate and beta1
- G_solver: instance of tf.optimizers.Adam with correct learning_rate and beta1
"""
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
D_solver = tf.optimizers.Adam(learning_rate=learning_rate, beta_1=beta1)
G_solver = tf.optimizers.Adam(learning_rate=learning_rate, beta_1=beta1)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return D_solver, G_solver
###Output
_____no_output_____
###Markdown
Training a GAN!Well that wasn't so hard, was it? After the first epoch, you should see fuzzy outlines, clear shapes as you approach epoch 3, and decent shapes, about half of which will be sharp and clearly recognizable as we pass epoch 5. **In our case, we'll simply train D(x) and G(z) with one batch each every iteration. However, papers often experiment with different schedules of training D(x) and G(z), sometimes doing one for more steps than the other, or even training each one until the loss gets "good enough" and then switching to training the other**.
###Code
# a giant helper function
def run_a_gan(D, G, D_solver, G_solver, discriminator_loss, generator_loss,
show_every=469, batch_size=128, num_epochs=10, noise_size=96):
"""Train a GAN for a certain number of epochs.
Inputs:
- D: Discriminator model
- G: Generator model
- D_solver: an Optimizer for Discriminator
- G_solver: an Optimizer for Generator
- generator_loss: Generator loss
- discriminator_loss: Discriminator loss
Returns:
Nothing
"""
mnist = MNIST(batch_size=batch_size, shuffle=True)
iter_count = 0
for epoch in range(num_epochs):
for (x, _) in mnist:
with tf.GradientTape() as tape:
real_data = x
i_batch_size = real_data.shape[0] # in case N is not dividable by batch_size
logits_real = D(preprocess_img(real_data))
g_fake_seed = sample_noise(i_batch_size, noise_size)
fake_images = G(g_fake_seed)
logits_fake = D(fake_images)
d_total_error = discriminator_loss(logits_real, logits_fake)
d_gradients = tape.gradient(d_total_error, D.trainable_variables)
D_solver.apply_gradients(zip(d_gradients, D.trainable_variables))
with tf.GradientTape() as tape:
g_fake_seed = sample_noise(i_batch_size, noise_size)
fake_images = G(g_fake_seed)
gen_logits_fake = D(fake_images)
g_error = generator_loss(gen_logits_fake)
g_gradients = tape.gradient(g_error, G.trainable_variables)
G_solver.apply_gradients(zip(g_gradients, G.trainable_variables))
if (iter_count % show_every == 0):
print('Epoch: %d , Iter: %d, D: %f.4, G: %f.4' %(epoch,iter_count,d_total_error,g_error))
imgs_numpy = fake_images.cpu().numpy()
show_images(imgs_numpy[0:16])
plt.show()
iter_count += 1
# random noise fed into our generator
z = sample_noise(batch_size, noise_size)
# generated images
G_sample = G(z)
print('Final images')
show_images(G_sample[:16])
plt.show()
###Output
_____no_output_____
###Markdown
Train your GAN! This should take about 10 minutes on a CPU, or about 2 minutes on GPU.
###Code
# Make the discriminator
D = discriminator()
# Make the generator
G = generator()
# Use the function you wrote earlier to get optimizers for the Discriminator and the Generator
D_solver, G_solver = get_solvers()
# Run it!
run_a_gan(D, G, D_solver, G_solver, discriminator_loss, generator_loss)
###Output
Epoch: 0 , Iter: 0, D: 1.304832.4, G: 0.708987.4
###Markdown
Least Squares GANWe'll now look at [Least Squares GAN](https://arxiv.org/abs/1611.04076), a newer, more stable alternative to the original GAN loss function. For this part, all we have to do is change the loss function and retrain the model. We'll implement equation (9) in the paper, with the generator loss:$$\ell_G = \frac{1}{2}\mathbb{E}_{z \sim p(z)}\left[\left(D(G(z))-1\right)^2\right]$$and the discriminator loss:$$ \ell_D = \frac{1}{2}\mathbb{E}_{x \sim p_\text{data}}\left[\left(D(x)-1\right)^2\right] + \frac{1}{2}\mathbb{E}_{z \sim p(z)}\left[ \left(D(G(z))\right)^2\right]$$**HINTS**: Instead of computing the expectation, we will be averaging over elements of the minibatch, so make sure to combine the loss by averaging instead of summing. When plugging in for $D(x)$ and $D(G(z))$ use the direct output from the discriminator (`score_real` and `score_fake`).
###Code
def ls_discriminator_loss(scores_real, scores_fake):
"""
Compute the Least-Squares GAN loss for the discriminator.
Inputs:
- scores_real: Tensor of shape (N, 1) giving scores for the real data.
- scores_fake: Tensor of shape (N, 1) giving scores for the fake data.
Outputs:
- loss: A Tensor containing the loss.
"""
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
loss_real = 0.5 * tf.math.pow(scores_real - 1, 2)
loss_fake = 0.5 * tf.math.pow(scores_fake, 2)
loss = loss_real + loss_fake
loss = tf.math.reduce_mean(loss, axis=0)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss
def ls_generator_loss(scores_fake):
"""
Computes the Least-Squares GAN loss for the generator.
Inputs:
- scores_fake: Tensor of shape (N, 1) giving scores for the fake data.
Outputs:
- loss: A Tensor containing the loss.
"""
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
loss = 0.5 * tf.math.pow(scores_fake - 1, 2)
loss = tf.math.reduce_mean(loss, axis=0)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss
###Output
_____no_output_____
###Markdown
Test your LSGAN loss. You should see errors less than 1e-8.
###Code
def test_lsgan_loss(score_real, score_fake, d_loss_true, g_loss_true):
d_loss = ls_discriminator_loss(tf.constant(score_real), tf.constant(score_fake))
g_loss = ls_generator_loss(tf.constant(score_fake))
print("Maximum error in d_loss: %g"%rel_error(d_loss_true, d_loss))
print("Maximum error in g_loss: %g"%rel_error(g_loss_true, g_loss))
test_lsgan_loss(answers['logits_real'], answers['logits_fake'],
answers['d_loss_lsgan_true'], answers['g_loss_lsgan_true'])
###Output
Maximum error in d_loss: 0
Maximum error in g_loss: 0
###Markdown
Create new training steps so we instead minimize the LSGAN loss:
###Code
# Make the discriminator
D = discriminator()
# Make the generator
G = generator()
# Use the function you wrote earlier to get optimizers for the Discriminator and the Generator
D_solver, G_solver = get_solvers()
# Run it!
run_a_gan(D, G, D_solver, G_solver, ls_discriminator_loss, ls_generator_loss)
###Output
Epoch: 0 , Iter: 0, D: 1.890192.4, G: 0.376653.4
###Markdown
Deep Convolutional GANsIn the first part of the notebook, we implemented an almost direct copy of the original GAN network from Ian Goodfellow. However, this network architecture allows no real spatial reasoning. It is unable to reason about things like "sharp edges" in general because it lacks any convolutional layers. Thus, in this section, we will implement some of the ideas from [DCGAN](https://arxiv.org/abs/1511.06434), where we use convolutional networks as our discriminators and generators. DiscriminatorWe will use a discriminator inspired by the **TensorFlow MNIST classification** [tutorial](https://www.tensorflow.org/get_started/mnist/pros), which is able to get above 99% accuracy on the MNIST dataset fairly quickly. **Be sure to check the dimensions of x and reshape when needed**, fully connected blocks expect [N,D] Tensors while conv2d blocks expect [N,H,W,C] Tensors. Please use `tf.keras.layers` to define the following architecture:Architecture:* Conv2D: 32 Filters, 5x5, Stride 1, padding 0* Leaky ReLU(alpha=0.01)* Max Pool 2x2, Stride 2* Conv2D: 64 Filters, 5x5, Stride 1, padding 0* Leaky ReLU(alpha=0.01)* Max Pool 2x2, Stride 2* Flatten* Fully Connected with output size 4 x 4 x 64* Leaky ReLU(alpha=0.01)* Fully Connected with output size 1Once again, please use biases for all convolutional and fully connected layers, and use the default parameter initializers. Note that a padding of 0 can be accomplished with the 'VALID' padding option.
###Code
def discriminator():
"""Compute discriminator score for a batch of input images.
Inputs:
- x: TensorFlow Tensor of flattened input images, shape [batch_size, 784]
Returns:
TensorFlow Tensor with shape [batch_size, 1], containing the score
for an image being real for each input image.
"""
model = tf.keras.models.Sequential([
# TODO: implement architecture
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
tf.keras.layers.Reshape((28, 28, 1), input_shape=(784,)),
tf.keras.layers.Conv2D(32, 5, strides=1, padding='valid'),
tf.keras.layers.LeakyReLU(alpha=0.01),
tf.keras.layers.MaxPool2D(2),
tf.keras.layers.Conv2D(64, 5, strides=1, padding='valid'),
tf.keras.layers.LeakyReLU(alpha=0.01),
tf.keras.layers.MaxPool2D(2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(4*4*64),
tf.keras.layers.LeakyReLU(alpha=0.01),
tf.keras.layers.Dense(1)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
])
return model
model = discriminator()
test_discriminator(1102721)
###Output
Correct number of parameters in discriminator.
###Markdown
GeneratorFor the generator, we will copy the architecture exactly from the [InfoGAN paper](https://arxiv.org/pdf/1606.03657.pdf). See Appendix C.1 MNIST. Please use `tf.keras.layers` for your implementation. You might find the documentation for [tf.keras.layers.Conv2DTranspose](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/Conv2DTranspose) useful. The architecture is as follows.Architecture:* Fully connected with output size 1024 * `ReLU`* BatchNorm* Fully connected with output size 7 x 7 x 128 * `ReLU`* BatchNorm* Resize into Image Tensor of size 7, 7, 128* Conv2D^T (transpose): 64 filters of 4x4, stride 2* `ReLU`* BatchNorm* Conv2d^T (transpose): 1 filter of 4x4, stride 2* `TanH`Once again, **use biases for the fully connected and transpose convolutional layers**. Please use the default initializers for your parameters. **For padding, choose the `same` option for transpose convolutions**. For Batch Normalization, assume we are always in `training` mode.
###Code
def generator(noise_dim=NOISE_DIM):
"""Generate images from a random noise vector.
Inputs:
- z: TensorFlow Tensor of random noise with shape [batch_size, noise_dim]
Returns:
TensorFlow Tensor of generated images, with shape [batch_size, 784].
"""
model = tf.keras.models.Sequential([
# TODO: implement architecture
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
tf.keras.layers.Dense(1024, input_shape=(noise_dim,), activation='relu'),
tf.keras.layers.BatchNormalization(axis=-1),
tf.keras.layers.Dense(7*7*128, activation='relu'),
tf.keras.layers.BatchNormalization(axis=-1),
tf.keras.layers.Reshape((7, 7, 128)), # H,W,C = 7, 7, 128
tf.keras.layers.Conv2DTranspose(64, 4, strides=2, padding='same', activation='relu'), # H,W,C = 14, 14, 64
tf.keras.layers.BatchNormalization(axis=-1),
tf.keras.layers.Conv2DTranspose(1, 4, strides=2, padding='same', activation='tanh'), # H,W,C = 28, 28, 1
tf.keras.layers.Reshape((28*28,))
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
])
return model
test_generator(6595521)
###Output
Correct number of parameters in generator.
###Markdown
We have to recreate our network since we've changed our functions. Train and evaluate a DCGANThis is the one part of A3 that significantly benefits from using a GPU. It takes 3 minutes on a GPU for the requested five epochs. Or about 50 minutes on a dual core laptop on CPU (feel free to use 3 epochs if you do it on CPU).
###Code
# Make the discriminator
D = discriminator()
# Make the generator
G = generator()
# Use the function you wrote earlier to get optimizers for the Discriminator and the Generator
D_solver, G_solver = get_solvers()
# Run it!
run_a_gan(D, G, D_solver, G_solver, discriminator_loss, generator_loss, num_epochs=5)
###Output
Epoch: 0 , Iter: 0, D: 1.425597.4, G: 0.653080.4
|
sel634_aula1.ipynb | ###Markdown
Definindo uma outra translação:
###Code
T12 = SE3(2,-1,-2)
T12
###Output
_____no_output_____
###Markdown
Podemos compor a translação $T1$ com a translação adicional $T12$, multiplicamos as duas transformações homogêneas:
###Code
T2 = T1*T12
T2
###Output
_____no_output_____
###Markdown
A sequencia de multiplicação aplicada acima, $T1*T12$, significa que a transformação $T12$ é definida em relação ao sistema de coordenadas resultante da transformação $T1$, ou seja, o *sistema de coordenadas atual*. Perceba que ao compor às transformações homogêneas, o vetor de translação resultante (quarta coluna da matriz $T2$) é igual a soma da translação definida em $T1$ com a translação definida em $T12$. Esse resultado foi obtido pois não houve rotação definida nessas transformações.
###Code
fig = plt.figure() # cria uma nova figura
SE3().plot(frame='0', dims=[0,4], color='black') # mostra o sistema de coordenadas para rotação zero
T1.plot(frame='1')
T2.plot(frame='2', color='red')
###Output
_____no_output_____
###Markdown
Representando uma Pose De forma mais geral, a matriz de transformação homogênea representa um movimento de translação 3D e rotação 3D, que definem a pose de um sistema de coordendas. Na biblioteca, um movimento rígido 3D que representa a pose de um sistema de coordenadas, pode ser composto por uma translação pura seguida de uma rotação pura:
###Code
T1 = SE3(1, 2, 3)* SE3.Rx(30, 'deg')
T1
###Output
_____no_output_____
###Markdown
O sistema resultante pode ser visualizado:
###Code
plt.figure()
SE3().plot(frame='0', dims=[0,4], color='black')
T1.plot(frame='1')
###Output
_____no_output_____
###Markdown
Propriedades As colunas da matriz de rotação representada na transformação homogênea podem ser acessadas:
###Code
T1.o
###Output
_____no_output_____
###Markdown
A inversa da matriz de transformação homogênea é dada por:
###Code
T1.inv()
###Output
_____no_output_____
###Markdown
A matrix de rotação pode ser acessada como um array da biblioteca numpy:
###Code
T1.R
###Output
_____no_output_____
###Markdown
O vetor de translação pode ser acessado como um array da biblioteca numpy:
###Code
T1.t
###Output
_____no_output_____
###Markdown
A dimensão da matriz é dada por:
###Code
T1.shape
###Output
_____no_output_____
###Markdown
e a ordem:
###Code
T1.N
###Output
_____no_output_____
###Markdown
que indica que a matriz opera no espaço 3D. As propriedades comuns às outras classes também podem ser verificadas:
###Code
[T1.isSE, T1.isSO, T1.isrot(), T1.ishom(), T1.isrot2(), T1.ishom2()]
###Output
_____no_output_____
###Markdown
Transformando pontosConsidere que o seguinte conjunto de pontos definem os vértices de um cubo:
###Code
P = np.array([[-1, 1, 1, -1, -1, 1, 1, -1], [-1, -1, 1, 1, -1, -1, 1, 1], [-1, -1, -1, -1, 1, 1, 1, 1]])
P
###Output
_____no_output_____
###Markdown
Os pontos $^AP_i$ são definidos em relação a um sistema de coordenadas de referência fixo no corpo $\{A\}$. A transformação homogênea $^0T_A$ define a pose do sistema de coordenadas do corpo $\{A\}$ em relação ao sistema de coordenadas inercial $\{0\}$ (sistema de coordenadas do mundo).A coordenada dos pontos descrita no sistema de coordenadas do mundo $^0P_i$ é dada por:$$ {^0P_i} = {^0T_A} {^AP_i} $$Usando a biblioteca:
###Code
Q = T1*P
###Output
_____no_output_____
###Markdown
Que pode ser visualizado por:
###Code
def plot_cubo(cubo,grafico):
grafico.scatter(xs=cubo[0], ys=cubo[1], zs=cubo[2], s=20) # draw vertices
# draw lines joining the vertices
lines = [[0,1,5,6], [1,2,6,7], [2,3,7,4], [3,0,4,5]]
for line in lines:
grafico.plot([cubo[0,i] for i in line], [cubo[1,i] for i in line], [cubo[2,i] for i in line])
fig = plt.figure()
SE3().plot(frame='0', dims=[-2,3,0,5,0,5], color='black')
grafico = plt.gca()
plot_cubo(Q,grafico)
###Output
_____no_output_____
###Markdown
Representando Coordenadas 2DPara coordenadas em 2D existe apenas rotação ao redor de um eixo que é perpendicular ao plano.Rotações em 2D podem ser representadas por matrizes de rotação $2 \times 2$ ortogonais que pertencem ao conjunto $SO(2)$. Da mesma forma que para rotações 3D, as matrizes de rotação em 2D também possuem as seguintes propriedades: cada coluna (e linha) é um vetor unitário, as colunas (e linhas) são mutualmente ortogonais, a inversa da matriz de rotação é igual a sua transposta, e o determinante é $+1$.Usando a biblioteca, uma matriz de rotação 2D para um ângulo de $\pi/4$ pode ser criada por:
###Code
R = SO2(pi/4)
R
###Output
_____no_output_____
###Markdown
Para definir o valor do ângulo em graus:
###Code
SO2(45, unit='deg')
###Output
_____no_output_____
###Markdown
A orientação do sistema de coordenadas representada pela matriz de rotação pode ser desenhado:
###Code
plt.figure()
R.plot()
###Output
_____no_output_____
###Markdown
A matriz de transformação homogênea em 2D é uma matriz $3 \times 3$ que pertence ao conjunto $SE(2)$.Para definir a origem de um sistema de coordenadas, ou seja, uma matriz que representa uma de translação em $SE(2)$, o seguinte comando pode ser usado:
###Code
T = SE2(1,2)
T
###Output
_____no_output_____
###Markdown
A matriz de rotação $2x2$ é meostrada em vermelho no canto superior esquerdo, o vetor de translação é mostrado em azul na coluna mais a direita, e as constantes da matriz de transformação homogênea são mostradas em cinza.É possível também usar a função passando a translação como uma lista:
###Code
T = SE2([1, 2])
T
###Output
_____no_output_____
###Markdown
Para plotar o sistema de coordenadas representado pela matriz de transformação homogênea:
###Code
plt.figure()
T.plot()
###Output
_____no_output_____
###Markdown
Para definir uma matriz de transformação homogênea que representa uma rotação:
###Code
T2 = SE2(45, unit='deg')
T2
plt.figure()
T2.plot()
###Output
_____no_output_____
###Markdown
É possível compor a rotação com a translação multiplicando as operações na seguinte na ordem:translação, e então rotação.
###Code
T3 = T*T2
T3
T4 = T2*T
T4
###Output
_____no_output_____
###Markdown
OperaçõesPara o caso 3D, as operações que resultam em uma matriz de transformação homogênea são apenas as operações de composição (representadas pelo operador *) e inversão.Por exemplo, para ${^0T_1}$ definida por:
###Code
T01 = SE3(1,2,3)*SE3.Rx(-30,'deg')
T01
###Output
_____no_output_____
###Markdown
${^1T_2}$ definida como sendo:
###Code
T12 = SE3(0,-2,-1)*SE3.Rz(70,'deg')
T12
###Output
_____no_output_____
###Markdown
Então ${^0T_2} = {^0T_1}{^1T_2}$:
###Code
T02 = T01*T12
T02
###Output
_____no_output_____
###Markdown
Portanto, ${^0T_1} = {^0T_2}({^1T_2})^{-1} = {^0T_2}{^2T_1}$:
###Code
T02*T12.inv()
###Output
_____no_output_____
###Markdown
Observe que a operação de adição não resulta em matrizes de transformação homogênea, e sim em uma matriz qualquer (array):
###Code
T01+T02
###Output
_____no_output_____
###Markdown
Outras operações como multiplicação por escalar ou adição de escalar também não resultam em matrizes de transformação homogênea:
###Code
2*T01
T01+1
###Output
_____no_output_____
###Markdown
Múltiplos ObjetosPode ser útil para algumas tarefas definir um conjunto ou sequencia de rotações ou posturas. Em Python, isso pode ser feito utilizando uma lista:
###Code
T = [ SE3.Rx(0), SE3.Rx(0.1), SE3.Rx(0.2), SE3.Rx(0.3), SE3.Rx(0.4)]
T
type(T)
len(T)
###Output
_____no_output_____
###Markdown
Mas os objetos da biblioteca podem conter multiplos valores. Portanto, pode-se definir um objeto `SE3` passando uma lista de objetos `SE3`:
###Code
T = SE3( [ SE3.Rx(0), SE3.Rx(0.1), SE3.Rx(0.2), SE3.Rx(0.3), SE3.Rx(0.4)] )
T
type(T)
len(T)
###Output
_____no_output_____
###Markdown
É possível acessar um elemento individual dessa lista:
###Code
T[3]
###Output
_____no_output_____
###Markdown
Também é possível acessar parte dessa lista. Para retornar o elemento $2$ e $3$:
###Code
T[2:4]
###Output
_____no_output_____
###Markdown
Ou para retornar os elementos do 0 ao 4 em passos de dois:
###Code
T[0:5:2]
###Output
_____no_output_____
###Markdown
Para indicar o último elemento, em Python se utiliza o $-1$, portanto:
###Code
T[0:-1:2]
###Output
_____no_output_____
###Markdown
Para adicionar um elemento ao final da lista:
###Code
T.append( SE3.Rx(0.5) )
len(T)
###Output
_____no_output_____
###Markdown
Para iniciar uma lista vazia:
###Code
T = SE3.Empty()
len(T)
###Output
_____no_output_____
###Markdown
Também é possível criar a lista anterior, que varia o ângulo de rotação de $0$ a $5$ em intervalos de $0.5$ através do comando `np.linspace`:
###Code
T = SE3.Rx( np.linspace(0, 0.5, 5) )
len(T)
T[3]
###Output
_____no_output_____
###Markdown
É possível aplicar uma composição comum a todos os elementos da lista:
###Code
T2 = SE3.Ry(40, 'deg')
###Output
_____no_output_____
###Markdown
Para obter `A` como sendo a composição de `T[i] * T2`:> Indented block
###Code
A = T * T2
len(A)
###Output
_____no_output_____
###Markdown
Para obter `B` como sendo a composição de `T2 * T[i]`:
###Code
B = T2 * T
len(B)
###Output
_____no_output_____
###Markdown
Para obter C em que cada elemento é `T[i] * T[i] `:
###Code
C = T * T
len(C)
###Output
_____no_output_____
###Markdown
É possível aplicar a sequencia de transformações a um vetor. Por exemplo:
###Code
P = T * [0, 1, 0]
P
###Output
_____no_output_____
###Markdown
SEL0634 - Laboratório de Robôs ManipuladoresAs aulas de laboratório utilizam as seguintes bibliotecas:* [Spatial Maths for Python](https://github.com/petercorke/spatialmath-pythonreadme)* [Robotics Toolbox for Python](https://github.com/petercorke/robotics-toolbox-python) Aula 1 - Descrições Espaciais e TransformaçõesA biblioteca [Spatial Maths for Python](https://github.com/petercorke/spatialmath-pythonreadme) disponibiliza classes para representar posição e orientação no espaço 3D e 2D. Recomenda-se a consulta à [documentação da biblioteca](https://petercorke.github.io/spatialmath-python/), e aos [notebooks de introdução](https://github.com/petercorke/spatialmath-python/tree/master/notebooks):* [gentle-introduction.ipynb](https://github.com/petercorke/spatialmath-python/blob/master/notebooks/gentle-introduction.ipynb)* [introduction.ipynb](https://github.com/petercorke/spatialmath-python/blob/master/notebooks/introduction.ipynb)Exemplos extraídos da documentação e dos notebooks da biblioteca são mostrados a seguir. Instalação da biblioteca O comando a seguir é usado para instalar a biblioteca para uso em um notebook no [Google Colab](https://colab.research.google.com/) ou [Binder](https://mybinder.org/):
###Code
!pip install spatialmath-python
###Output
Collecting spatialmath-python
[?25l Downloading https://files.pythonhosted.org/packages/34/60/3d7e1abdab4a3241e987ea3310bf74a5c9f3807a4e862c0824b07acd58c4/spatialmath-python-0.9.6.tar.gz (148kB)
[K |████████████████████████████████| 153kB 6.4MB/s
[?25hRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from spatialmath-python) (1.19.5)
Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from spatialmath-python) (1.4.1)
Requirement already satisfied: matplotlib in /usr/local/lib/python3.7/dist-packages (from spatialmath-python) (3.2.2)
Collecting colored
[?25l Downloading https://files.pythonhosted.org/packages/b2/16/04827e24c14266d9161bd86bad50069fea453fa006c3d2b31da39251184a/colored-1.4.2.tar.gz (56kB)
[K |████████████████████████████████| 61kB 4.9MB/s
[?25hCollecting ansitable
Downloading https://files.pythonhosted.org/packages/91/57/60e1fa72f83071caa22743dab38786527ecd861dd980d84051fc975f1ed8/ansitable-0.9.6.tar.gz
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->spatialmath-python) (2.4.7)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->spatialmath-python) (1.3.1)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->spatialmath-python) (2.8.1)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib->spatialmath-python) (0.10.0)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.1->matplotlib->spatialmath-python) (1.15.0)
Building wheels for collected packages: spatialmath-python, colored, ansitable
Building wheel for spatialmath-python (setup.py) ... [?25l[?25hdone
Created wheel for spatialmath-python: filename=spatialmath_python-0.9.6-cp37-none-any.whl size=159519 sha256=22b036546903832927f907c1b3c98503a34121ff36b25daf9acd1e98d5c87d78
Stored in directory: /root/.cache/pip/wheels/a8/9f/51/01ce301eeaa8edbfb34f13534331ad90918a0f4b63931c593c
Building wheel for colored (setup.py) ... [?25l[?25hdone
Created wheel for colored: filename=colored-1.4.2-cp37-none-any.whl size=14003 sha256=134bd13445ef99aec85d99ca5ea6a2e809b4880ffdcecd494f48e472f8a1c0c0
Stored in directory: /root/.cache/pip/wheels/ec/10/5e/d17d5ef644e1051a753bd98f3796789ec39bc3337cd36637f3
Building wheel for ansitable (setup.py) ... [?25l[?25hdone
Created wheel for ansitable: filename=ansitable-0.9.6-cp37-none-any.whl size=12621 sha256=03f94924cda98d6dce88fcaab6cac67618c7861208ff7a5abccd0b82d5e46225
Stored in directory: /root/.cache/pip/wheels/8d/bd/87/460de24b1747521745207f5d43fa56577014937d3792afb8eb
Successfully built spatialmath-python colored ansitable
Installing collected packages: colored, ansitable, spatialmath-python
Successfully installed ansitable-0.9.6 colored-1.4.2 spatialmath-python-0.9.6
###Markdown
A sequencia de comandos a seguir carrega as bibliotecas que serão usadas:
###Code
import numpy as np
from spatialmath import *
from math import pi
import matplotlib.pyplot as plt
# no Binder utilizar essa linha
# %matplotlib notebook
# no Colab utilizar essa linha
%matplotlib inline
###Output
_____no_output_____
###Markdown
Representação de Coordenadas 3D Matriz de rotaçãoUma das formas de representar orientação no espaço 3D é através de matrizes de rotação $3\times3$, que pertencem ao conjunto $SO(3)$. A biblioteca fornece funções para definir as matrizes básicas de rotação. Por exemplo, a matriz de rotação de $\pi/4$ radianos em torno do eixo $x$ é definida por:
###Code
R1 = SO3.Rx(pi/4)
###Output
_____no_output_____
###Markdown
Seu conteúdo pode ser mostrado:
###Code
R1
###Output
_____no_output_____
###Markdown
Para definir uma matriz utilizando ângulos em graus:
###Code
SO3.Rx(45,'deg')
###Output
_____no_output_____
###Markdown
Para compor matrizes de rotação, a biblioteca em Python utiliza o operador *
###Code
R1*R1
###Output
_____no_output_____
###Markdown
Também é possível utilizar o operador exponencial para repetir rotações iguais em sequencia:
###Code
R1**2
###Output
_____no_output_____
###Markdown
Uma rotação de $\pi/4$ em torno do eixo X seguida de uma outra rotação de $\pi/4$ em torno do eixo X resulta em uma rotação de $\pi/2$ em torno do eixo X. A soma dos ângulos é possível pois as duas rotações consecutivas são aplicadas em relação ao mesmo eixo. É possivel verificar esse resultado:
###Code
SO3.Rx(pi/2)
###Output
_____no_output_____
###Markdown
Uma matriz de rotação descreve a orientação de um sistema de coordenadas. Para visualizar o sistema de coordendas descrito pela matriz de rotação:
###Code
fig = plt.figure() # cria uma nova figura
SE3().plot(frame='0', dims=[-1.5,1.5], color='black') # mostra o sistema de coordenadas para rotação zero
R1.plot(frame='1') # mostra o sistema de coordenadas com orientação dada por R1
###Output
_____no_output_____
###Markdown
A matriz de rotação não possui propriedade comutativa. Veja que $ R_x(\pi/2) R_y(\pi/2)$ é diferente de $ R_y(\pi/2) R_x(\pi/2)$:
###Code
R2 = SO3.Rx(pi/2)*SO3.Ry(pi/2)
R3 = SO3.Ry(pi/2)*SO3.Rx(pi/2)
fig = plt.figure() # cria uma nova figura
SE3().plot(frame='0', dims=[-1.5,1.5], color='black') # mostra o sistema de coordenadas para rotação zero
R2.plot(frame='2', color='red')
R3.plot(frame='2', color='blue')
###Output
_____no_output_____
###Markdown
Uma forma de definir uma rotação 3D é através dos ângulos roll, pitch, yaw. Para definir uma rotação com ângulo roll de $10^o$, ângulo pitch de $20^o$ e ângulo yaw de $30^o$ utiliza-se o comando:
###Code
R4 = SO3.RPY([10, 20, 30], unit='deg')
R4
###Output
_____no_output_____
###Markdown
A matriz de rotação resultante equivale à aplicação de uma rotação de $30^o$ em torno do eixo $Z$ (yaw), seguida de uma rotação de $20^o$ em torno do eixo $Y$ (pitch), seguida de uma rotação de $10^o$ em torno do eixo $X$, que é a convenção $ZYX$ para rotação roll-pitch-yaw. Nessa convenção, a sequencia de rotações $ZYX$ é aplicada em relação ao *sistema de coordenadas atual*, ou seja, primeiro o sistema de coordendas inicial é rotacionado em torno do eixo $Z$. Em seguida, é aplicada uma rotação em torno do eixo $Y$ atual, ou seja, do eixo $Y$ do sistema resultande da primeira rotação. Finalmente, é aplicada uma rotação em torno do eixo $X$ atual, que é o eixo $X$ do sistema resultante das rotações anteriores. Perceba que cada nova rotação é **pós-multiplicada** ao resultado já obtido.
###Code
SO3.Rz(30,'deg')*SO3.Ry(20,'deg')*SO3.Rx(10,'deg')
###Output
_____no_output_____
###Markdown
A sequencia anterior também pode ser interpretada como sendo a composição de uma sequencia de rotações na ordem $XYZ$ definidas em relação ao *sistema de coordenadas fixo* inicial. Ou seja, primeiro aplica-se a rotação em torno do eixo $X$ do sistema inicial. O sistema resultante é então rotacionado em torno do eixo $Y$ do sistema inicial. Finalmente, o sistema resultante das duas rotações anteriores é rotacionado em torno do eixo $Z$ do sistema inicial. Perceba que todas as rotações foram definidas em torno do mesmo sistema, por isso, é chamado de composição de rotações em relação ao um *sistema de coordenadas fixo*, e cada nova rotação é **pré-multiplicada** ao resultado obtido. Para visualizar a rotação resultante:
###Code
fig = plt.figure() # cria uma nova figura
SE3().plot(frame='0', dims=[-1.5,1.5], color='black') # mostra o sistema de coordenadas para rotação zero
R4.plot(frame='4') # mostra o sistema de coordenadas com orientação dada por R4
###Output
_____no_output_____
###Markdown
É possível obter os ângulos roll-pitch-yaw em radianos a partir da matrix de rotação:
###Code
R4.rpy()
###Output
_____no_output_____
###Markdown
PropriedadesO objeto que representa a matriz de rotação em Python possui algumas propriedades.As colunas da matriz de rotação podem ser acessadas individualmente considerando $\mathbf{R} = [n, o, a]$, onde $n$, $o$ e $a$ são vetores tridimensionais.
###Code
R2.n
R2.o
R2.a
###Output
_____no_output_____
###Markdown
A inversa da matriz de rotação, que é igual a sua transposta, pode ser obtida por:
###Code
R2.inv()
###Output
_____no_output_____
###Markdown
A dimensão da matriz pode ser obtida por:
###Code
R2.shape
###Output
_____no_output_____
###Markdown
A ordem da matriz é dada por:
###Code
R2.N
###Output
_____no_output_____
###Markdown
que indica que a matriz opera no espaço 3D. Algumas outras propriedades são comuns a outras classes da biblioteca:
###Code
[R1.isSE, R1.isSO, R1.isrot(), R1.ishom(), R1.isrot2(), R1.ishom2()]
###Output
_____no_output_____
###Markdown
Representando PosiçõesUm movimento rígido composto de uma rotação e translação pode ser descrito por uma matriz de transformação homogênea, matriz $4\times4$ que pertence ao conjunto $SE(3)$.A translação de $1$ na direção $x$, $2$ na direção $y$, e $3$ na direção $z$ pode ser definida pelo seguinte comando da biblioteca:
###Code
T1 = SE3(1,2,3)
T1
###Output
_____no_output_____
###Markdown
onde a matriz de rotação é mostrada em vermelho, o vetor de translação em azul, e as constantes da linha inferior em cinza. A matriz de rotação é a matriz identidade pois foi definida apenas uma translação.O vetor de translação pode ser também interpretado como a origem do sistema em relação a um referêncial inercial.Para visualizar o sistema de coordenadas definido por $T1$:
###Code
fig = plt.figure() # cria uma nova figura
SE3().plot(frame='0', dims=[0,4], color='black') # mostra o sistema de coordenadas para rotação zero
T1.plot(frame='1')
###Output
_____no_output_____ |
Course II/Python_SQL/control/ПИ19-4 Деменчук Георгий Зачет.ipynb | ###Markdown
Зачет28 мая 2021 года ПИ19-3, ПИ19-4 - 3 подгруппа5 июня 2021 года ПИ19-2, ПИ19-3, ПИ19-4 - 2 подгруппа4 июня 2021 года ПИ19-4, ПИ19-5 - 4 подгруппа Деменчук Георгий, ПИ19-4
###Code
#14
"""
ORM
Напишите запрос, выполняющий вывод имен и фамилий студентов,
получивших хотя бы одну отличную оценку.
"""
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class City(Base):
__tablename__ = 'city'
city_id = Column(Integer, primary_key=True)
name = Column(Text)
class Subject(Base):
__tablename__ = 'subject'
subj_id = Column(Integer, primary_key=True)
subj_name = Column(String(64), nullable=False, index=True)
hour = Column(Integer, nullable=False, index=True)
semester = Column(Integer, nullable=False)
class University(Base):
__tablename__ = 'university'
univ_id = Column(Integer, primary_key=True)
univ_name = Column(String(255))
rating = Column(Integer)
city = Column(Integer)
class Lecturer(Base):
__tablename__ = 'lecturer'
lecturer_id = Column(Integer, primary_key=True)
surname = Column(String(30), nullable=False)
name = Column(String(30), nullable=False)
city = Column(String(30))
univ_id = Column(ForeignKey('university.univ_id'), nullable=False)
univ = relationship('University')
class Student(Base):
__tablename__ = 'student'
student_id = Column(Integer, primary_key=True)
surname = Column(String(255), nullable=False)
name = Column(String(255), nullable=False)
stipend = Column(Integer, nullable=False)
kurs = Column(Integer, nullable=False)
city = Column(String(300))
birthday = Column(DateTime)
univ_id = Column(ForeignKey('university.univ_id'), nullable=False)
univ = relationship('University')
class ExamMark(Base):
__tablename__ = 'exam_marks'
exam_id = Column(Integer, primary_key=True)
student_id = Column(ForeignKey('student.student_id'))
subj_id = Column(ForeignKey('subject.subj_id'))
mark = Column(Integer)
exam_date = Column(DateTime, nullable=False)
student = relationship('Student')
subj = relationship('Subject')
class SubjLect(Base):
__tablename__ = 'subj_lect'
subj_lect_id = Column(Integer, primary_key=True)
lecturer_id = Column(ForeignKey('lecturer.lecturer_id'))
subj_id = Column(ForeignKey('subject.subj_id'))
lecturer = relationship('Lecturer')
subj = relationship('Subject')
engine = create_engine('sqlite:///Students_2021.sqlite') # 2b
Session = sessionmaker(bind=engine) # 3
session = Session() # 4
q = session.query(Student).join(ExamMark)
q = q.filter(ExamMark.mark==5)
for item in q.all():
print(item.student_id, item.surname, item.name)
#16
"""
Core
Напишите запрос, который выполняет выборку фамилий ВСЕХ студентов,
с указанием для студентов, сдававших экзамены, идентификаторов сданных ими предметов обучения.
"""
from sqlalchemy import (select, create_engine, MetaData, Table, Numeric, String,
insert, update, delete, cast)
from datetime import datetime
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
engine=create_engine('sqlite:///Students_2021.sqlite')
metadata=MetaData()
connection=engine.connect()
city=Table('city', metadata, autoload=True, autoload_with=engine)
subject=Table('subject', metadata, autoload=True, autoload_with=engine)
university=Table('university', metadata, autoload=True, autoload_with=engine)
lecturer=Table('lecturer', metadata, autoload=True, autoload_with=engine)
student=Table('student', metadata, autoload=True, autoload_with=engine)
exam_marks=Table('exam_marks', metadata, autoload=True, autoload_with=engine)
subj_lect=Table('subj_lect', metadata, autoload=True, autoload_with=engine)
joined_tables = student.join(exam_marks, student.c.student_id==exam_marks.c.student_id, isouter=True)
s = select([
student.c.student_id,
student.c.name,
student.c.surname,
exam_marks.c.mark,
exam_marks.c.subj_id,
]
).select_from(joined_tables)
print(str(s))
rp = connection.execute(s)
data_dict = {}
#Из полученных данных формируем словарь с оценками по предметам
for item in rp.fetchall():
if item.student_id not in data_dict:
data_dict[item.student_id] = {"marks" : [], "name": item.name , "surname": item.surname}
elif item.mark != None:
data_dict[item.student_id]["marks"].append(item.mark)
#Просто для красивого вывода
for k,v in data_dict.items():
if len(v["marks"]) == 0:
data_dict[k]["marks"] = "не сдавал"
else:
data_dict[k]["marks"] = "предметы: " + ", ".join(map(str,v["marks"]))
#Вывод
for k, v in data_dict.items():
print(f"{k} {v['surname']} {v['name']}, {v['marks']}")
###Output
SELECT student.student_id, student.name, student.surname, exam_marks.mark, exam_marks.subj_id
FROM student LEFT OUTER JOIN exam_marks ON student.student_id = exam_marks.student_id
1 Иванов Иван, предметы: 4, 3
3 Петров Петр, не сдавал
6 Сидоров Вадим, не сдавал
10 Кузнецов Борис, не сдавал
12 Зайцева Ольга, предметы: 5, 4
32 Котов Павел, предметы: 4, 3, 5, 4
55 Белкин Вадим, предметы: 4, 3, 3, 3
56 Сергеева Елизавета, предметы: 5, 3, 5, 3, 2
62 Семенова Вероника, предметы: 4, 4
63 Медведева Ирина, не сдавал
64 Афанасьева Оксана, не сдавал
71 Синицин Кондрат, предметы: 4
76 Мельникова Дарья, предметы: 3, 3, 5, 3
100 Никитин Самуил, предметы: 4, 3, 4, 3, 5
101 Калашников Евгений, предметы: 3
102 Петров Касьян, не сдавал
103 Шоу Бернар, не сдавал
104 Доу Джо, не сдавал
105 Юнг Карл, не сдавал
|
tests/comparison/statistics.ipynb | ###Markdown
1. Compare Time
###Code
num_event = [10, 20, 40, 80, 160]
num_pick = [500, 1000, 2000, 4000, 8000]
num_sta = 25
time_gamma = [1.10, 3.51, 9.87, 35.14, 149.71]
time_real = [57.48, 94.86, 188.36, 405.86, 769.67]
fig, ax = plt.subplots()
ax.plot(num_event, time_gamma, "o-")
ax.plot(num_event, time_real, "o-")
ax.set_xlabel("Number of events")
ax.set_ylabel("Time (s)")
ax.set_xscale("log")
ax.set_yscale("log")
ax.xaxis.grid(True, which='minor')
ax.set_xticks(num_event, num_event)
ax.tick_params(axis='both', which='both', length=0)
twiny = ax.twiny()
twiny.plot(num_pick, time_gamma, "o-", label="GaMMA")
twiny.plot(num_pick, time_real, "o-", label="REAL")
twiny.set_xlabel("Number of picks")
twiny.set_xscale("log")
twiny.set_yscale("log")
twiny.xaxis.grid(True, which='minor')
twiny.set_xticks(num_pick, num_pick)
twiny.tick_params(axis='both', which='both', length=0)
twiny.legend()
fig.tight_layout()
plt.savefig(figure_path("time_comparison.jpg"), dpi=300, bbox_inches="tight")
plt.savefig(figure_path("time_comparison.pdf"), bbox_inches="tight")
###Output
_____no_output_____
###Markdown
2. Calculate Precision and Recall
###Code
def confusion_matrix(label, pred, threshold=[3.0]):
index = np.ones([len(pred), len(label)]) * True
for i in range(len(threshold)):
diff = label[np.newaxis, :, i] - pred[:, np.newaxis, i]
index = np.logical_and(index, np.abs(diff) < threshold[i])
# true_pred = np.sum(np.any(np.abs(diff) < threshold, axis=1))
# recall_label = np.sum(np.any(np.abs(diff) < threshold, axis=0))
true_pred = np.sum(np.any(index, axis=1))
recall_label = np.sum(np.any(index, axis=0))
precision = true_pred / len(pred)
print(true_pred, len(pred))
recall = recall_label / len(label)
f1 = 2 * precision * recall / (precision + recall)
print(f"precision: {precision:.2f}, recall: {recall:.2f}, f1: {f1:.2f}")
return precision, recall, f1
synthetic_events = pd.read_csv("synthetic_events.csv", sep="\t")
synthetic_events["timestamp"] = synthetic_events["time"].apply(lambda x: datetime.fromisoformat(x).timestamp())
print(len(synthetic_events))
gamma_catalog = pd.read_csv("catalog_gamma.csv", sep="\t")
gamma_catalog["timestamp"] = gamma_catalog["time"].apply(lambda x: datetime.fromisoformat(x).timestamp())
print(len(gamma_catalog))
confusion_matrix(synthetic_events[["timestamp", "x(km)", "y(km)"]].to_numpy(),
gamma_catalog[["timestamp", "x(km)", "y(km)"]].to_numpy(),
threshold=[3.0, 5.0, 5.0])
confusion_matrix(synthetic_events[["timestamp"]].to_numpy(),
gamma_catalog[["timestamp"]].to_numpy(),
threshold=[3.0])
confusion_matrix(synthetic_events[["timestamp", "x(km)", "y(km)"]].to_numpy(),
gamma_catalog[["timestamp", "x(km)", "y(km)"]].to_numpy(),
threshold=[3.0, 15.0, 15.0])
# real_catalog = pd.read_csv("real/catalog_sel.txt", sep="\s+", header=None, names=["num", "year", "mon", "day", "time_raw", "time_seconds", "residual", "lat", "lon", "dep", "mag", "mag_var", "num_p", "num_s", "num_picks", "num_stations", "station_gap"])
real_catalog = pd.read_csv("real/10_4_4_0/hypolocSA.dat", sep="\s+", header=None, names=["year", "mon", "day", "hh", "mm", "ss", "lat", "lon", "dep", "mag", "num_pick", "sta_gap", "residual", "num"])
real_catalog["x(km)"] = (real_catalog["lon"] - config["center"][0]) * config["degree2km"]
real_catalog["y(km)"] = (real_catalog["lat"] - config["center"][1]) * config["degree2km"]
real_catalog["time"] = real_catalog.apply(lambda x: f"{x.year:04.0f}-{x.mon:02.0f}-{x.day:02.0f}T{x.hh:02.0f}:{x.mm:02.0f}:{x.ss:06.3f}", axis=1)
real_catalog["timestamp"] = real_catalog["time"].apply(lambda x: datetime.fromisoformat(x).timestamp())
print(len(real_catalog))
# confusion_matrix(synthetic_events[["timestamp"]].to_numpy(), real_catalog[["timestamp"]].to_numpy(), threshold=[3.0])
confusion_matrix(synthetic_events[["timestamp"]].to_numpy(), real_catalog[["timestamp"]].to_numpy(), threshold=[3.0])
confusion_matrix(synthetic_events[["timestamp", "x(km)", "y(km)"]].to_numpy(),
real_catalog[["timestamp", "x(km)", "y(km)"]].to_numpy(),
threshold=[3.0, 15.0, 15.0])
###Output
50 50
precision: 1.00, recall: 1.00, f1: 1.00
46 50
precision: 0.92, recall: 0.92, f1: 0.92
###Markdown
3. Time Error and Amplitude Error
###Code
# time_err = np.array([0.0, 0.2, 0.4, 0.6, 0.8, 1.0]) * 100
# precision_gamma = [1.00, 1.00, 1.00, 1.00, 1.00, 1.00]
# recall_gamma = [1.00, 1.00, 1.00, 1.00, 1.00, 1.00]
# f1_gamma = [1.00, 1.00, 1.00, 1.00, 1.00, 1.00]
# sigma11_gamma = [0.00, 0.11, 0.22, 0.33, 0.44, 0.53]
# sigma22_gamma = [0.00, 0.00, 0.01, 0.01, 0.01, 0.02]
# amp_err = np.array([0.0, 0.2, 0.4, 0.6, 0.8, 1.0]) * 100
# precision_gamma = [1.00, 1.00, 1.00, 1.00, 1.00, 1.00]
# recall_gamma = [1.00, 1.00, 1.00, 1.00, 1.00, 1.00]
# f1_gamma = [1.00, 1.00, 1.00, 1.00, 1.00, 1.00]
# sigma11_gamma = [0.00, 0.00, 0.01, 0.01, 0.01, 0.01]
# sigma22_gamma = [0.00, 0.20, 0.39, 0.52, 0.69, 0.76]
## Time and Amplitude Error together
time_err = np.array([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
amp_err = np.array([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
err = np.array([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
precision_gamma = [1.00, 1.00, 1.00, 1.00, 0.98, 0.93]
recall_gamma = [1.00, 1.00, 1.00, 1.00, 1.00, 0.96]
f1_gamma = [1.00, 1.00, 1.00, 1.00, 0.99, 0.94]
sigma11_gamma = [0.00, 0.19, 0.38, 0.56, 0.74, 0.88]
sigma22_gamma = [0.00, 0.20, 0.40, 0.60, 0.76, 0.83]
fig, ax = plt.subplots(2, 1, sharex=False, figsize=(6, 6))
ax[0].plot(amp_err, precision_gamma, "o-", label="Precision")
ax[0].plot(amp_err, recall_gamma, "o-", label="Recall")
ax[0].plot(amp_err, f1_gamma, "o-", label="F1")
ax[0].set_xlabel(r"Error of Picks")
ax[0].set_ylabel("Score")
ax[0].set_xticks(amp_err, amp_err)
ax[0].legend()
ax[1].plot(amp_err, sigma11_gamma, "o-", label=r"$\sigma_{11}$")
ax[1].plot(amp_err, sigma22_gamma, "o-", label=r"$\sigma_{22}$")
ax[1].set_xlabel(r"Error of Picks")
ax[1].set_ylabel("Standard Deviation")
ax[1].set_xticks(amp_err, amp_err)
ax[1].legend()
fig.tight_layout()
plt.savefig(figure_path("pick_error.jpg"), dpi=300, bbox_inches="tight")
plt.savefig(figure_path("pick_error.pdf"), bbox_inches="tight")
###Output
_____no_output_____
###Markdown
4. False Positive Rate
###Code
## using a time threshold of 3.0, using a space threshold of 5.0,
# false_positive_rate = np.array([0.0, 0.2, 0.4, 0.6, 0.8, 1.0]) * 100
# precision_gamma = [1.00, 1.00, 0.96, 1.00, 0.94, 0.96]
# recall_gamma = [1.00, 1.00, 0.98, 1.00, 0.98, 0.98]
# f1_gamma = [1.00, 1.00, 0.97, 1.00, 0.96, 0.97]
# sigma11_gamma = [0.00, 0.05, 0.08, 0.07, 0.11, 0.11]
# sigma22_gamma = [0.00, 0.05, 0.05, 0.04, 0.07, 0.07]
false_positive_rate = np.array([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
false_positive_rate = false_positive_rate / (false_positive_rate + 1) * 100
precision_gamma = [1.00, 1.00, 1.00, 0.98, 0.92, 0.74]
recall_gamma = [1.00, 1.00, 1.00, 1.00, 0.96, 0.96]
f1_gamma = [1.00, 1.00, 1.00, 0.99, 0.94, 0.84]
sigma11_gamma = [0.38, 0.39, 0.40, 0.42, 0.44, 0.50]
sigma22_gamma = [0.40, 0.38, 0.39, 0.40, 0.40, 0.43]
fig, ax = plt.subplots(2, 1, sharex=False, figsize=(6, 6))
ax[0].plot(false_positive_rate, precision_gamma, "o-", label="Precision")
ax[0].plot(false_positive_rate, recall_gamma, "o-", label="Recall")
ax[0].plot(false_positive_rate, f1_gamma, "o-", label="F1")
ax[0].set_xlabel("False Positive Rate (100%)")
ax[0].set_ylabel("Score")
ax[0].set_xticks(false_positive_rate, [f"{x:.1f}" for x in false_positive_rate])
ax[0].legend()
ax[1].plot(false_positive_rate, sigma11_gamma, "o-", label=r"$\sigma_{11}$")
ax[1].plot(false_positive_rate, sigma22_gamma, "o-", label=r"$\sigma_{22}$")
ax[1].set_xlabel("False Positive Rate (100%)")
ax[1].set_ylabel("Standard Deviation")
ax[1].set_xticks(false_positive_rate, [f"{x:.1f}" for x in false_positive_rate])
ax[1].legend()
fig.tight_layout()
plt.savefig(figure_path("false_positive.jpg"), dpi=300, bbox_inches="tight")
plt.savefig(figure_path("false_positive.pdf"), bbox_inches="tight")
###Output
_____no_output_____
###Markdown
5. Velocity Model
###Code
## using a threshold of 3.0
# err_vel = np.array([-0.0, -0.1, -0.2, -0.3, -0.4]) * 100
# precision_gamma = [1.00, 1.00, 1.00, 0.79, 0.51]
# recall_gamma = [1.00, 1.00, 1.00, 0.82, 0.46]
# f1_gamma = [1.00, 1.00, 1.00, 0.81, 0.48]
# sigma11_gamma = [0.00, 0.05, 0.12, 0.23, 0.30]
# sigma22_gamma = [0.00, 0.02, 0.04, 0.05, 0.14]
## using a time threshold of 3.0 s, spatical threshold of 15.0 km
err_vel = np.array([-0.2, -0.1, 0.0, 0.1, 0.2, 0.3]) * 100
# err_vel = (1/(err_vel + 1) - 1) * 100
precision_gamma = [0.65, 0.92, 1.00, 1.00, 0.96, 0.72]
recall_gamma = [0.66, 0.94, 1.00, 1.00, 0.96, 0.74]
f1_gamma = [0.66, 0.93, 1.00, 1.00, 0.96, 0.74]
sigma11_gamma = [0.69, 0.52, 0.40, 0.50, 0.60, 0.64]
sigma22_gamma = [0.40, 0.39, 0.39, 0.39, 0.39, 0.39]
fig, ax = plt.subplots(2, 1, sharex=False, figsize=(6, 6))
ax[0].plot(err_vel, precision_gamma, "o-", label="Precision")
ax[0].plot(err_vel, recall_gamma, "o-", label="Recall")
ax[0].plot(err_vel, f1_gamma, "o-", label="F1")
ax[0].set_xlabel("Velocity Error (100%)")
ax[0].set_ylabel("Score")
ax[0].set_xticks(err_vel, err_vel)
ax[0].legend()
ax[1].plot(err_vel, sigma11_gamma, "o-", label=r"$\sigma_{11}$")
ax[1].plot(err_vel, sigma22_gamma, "o-", label=r"$\sigma_{22}$")
ax[1].set_xlabel("Velocity Error (100%)")
ax[1].set_ylabel("Standard Deviation")
ax[1].set_xticks(err_vel, err_vel)
ax[1].legend()
fig.tight_layout()
plt.savefig(figure_path("velocity_error.jpg"), dpi=300, bbox_inches="tight")
plt.savefig(figure_path("velocity_error.pdf"), bbox_inches="tight")
###Output
_____no_output_____
###Markdown
6. Oversampling Rate
###Code
##### vp *= 0.9
##### vs *= 0.9
##### phase_amp_err = 0.4
##### phase_time_err = 0.4
##### phase_fp_rate = 0.4
# sampling_rate = np.array([0.1, 0.5, 1., 2., 3., 4., 5., 8.0])
# # err_vel = (1/(err_vel + 1) - 1) * 100
# precision_gamma = [0.98, 1.00, 0.92, 0.92, 0.96, 0.94, 0.94]
# recall_gamma = [0.80, 0.98, 0.94, 0.94, 0.98, 0.96, 0.96]
# f1_gamma = [0.88, 0.99, 0.93, 0.93, 0.97, 0.95, 0.95]
# sigma11_gamma = [0.57, 0.53, 0.52, 0.52, 0.52, 0.52, 0.52]
# sigma22_gamma = [0.43, 0.39, 0.39, 0.39, 0.39, 0.39, 0.39]
sampling_rate = np.array([0.5, 1., 2., 4., 8.0])
# err_vel = (1/(err_vel + 1) - 1) * 100
precision_gamma = [0.98, 1.00, 0.92, 0.96, 0.94]
recall_gamma = [0.80, 0.98, 0.94, 0.98, 0.96]
f1_gamma = [0.88, 0.99, 0.93, 0.97, 0.95]
sigma11_gamma = [0.57, 0.53, 0.52, 0.52, 0.52]
sigma22_gamma = [0.43, 0.39, 0.39, 0.39, 0.39]
fig, ax = plt.subplots(2, 1, sharex=False, figsize=(6, 6))
ax[0].plot(sampling_rate, precision_gamma, "o-", label="Precision")
ax[0].plot(sampling_rate, recall_gamma, "o-", label="Recall")
ax[0].plot(sampling_rate, f1_gamma, "o-", label="F1")
ax[0].set_xlabel("Oversampling ratio")
ax[0].set_ylabel("Score")
ax[0].set_xscale("log")
ax[0].set_xticks(sampling_rate, sampling_rate)
ax[0].legend()
ax[1].plot(sampling_rate, sigma11_gamma, "o-", label=r"$\sigma_{11}$")
ax[1].plot(sampling_rate, sigma22_gamma, "o-", label=r"$\sigma_{22}$")
ax[1].set_xlabel("Oversampling ratio")
ax[1].set_ylabel("Standard Deviation")
ax[1].set_xscale("log")
ax[1].set_xticks(sampling_rate, sampling_rate)
ax[1].legend()
fig.tight_layout()
plt.savefig(figure_path("oversampling.jpg"), dpi=300, bbox_inches="tight")
plt.savefig(figure_path("oversampling.pdf"), bbox_inches="tight")
##
real_4 = [0.94, 0.90, 0.92]
###Output
_____no_output_____ |
jupyter/docker_enterprise/docker_notebooks/Spark_NLP/Healthcare/3.1.Calculate_Medicare_Risk_Adjustment_Score.ipynb | ###Markdown
Medicare Risk Adjustment:In the United States, the Centers for Medicare & Medicaid Services sets reimbursement for private Medicare plan sponsors based on the assessed risk of their beneficiaries. Information found in unstructured medical records may be more indicative of member risk than existing structured data, creating more accurate risk pools.
###Code
import os
jsl_secret = os.getenv('SECRET')
import sparknlp
sparknlp_version = sparknlp.version()
import sparknlp_jsl
jsl_version = sparknlp_jsl.version()
print (jsl_secret)
import os
import json
import string
import numpy as np
import pandas as pd
import sparknlp
import sparknlp_jsl
from sparknlp.base import *
from sparknlp.util import *
from sparknlp.annotator import *
from sparknlp_jsl.base import *
from sparknlp_jsl.annotator import *
from pyspark.sql import functions as F
from pyspark.ml import Pipeline, PipelineModel
from sparknlp.training import CoNLL
params = {"spark.driver.memory":"16G",
"spark.kryoserializer.buffer.max":"2000M",
"spark.driver.maxResultSize":"2000M"}
spark = sparknlp_jsl.start(jsl_secret,params=params)
print ("Spark NLP Version :", sparknlp.version())
print ("Spark NLP_JSL Version :", sparknlp_jsl.version())
spark
###Output
_____no_output_____
###Markdown
Downloading oncology notesIn this notebook we will use the clinical notes extracted from www.mtsamples.com.
###Code
!wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Healthcare/data/mt_oncology_10.zip
!unzip -q mt_oncology_10.zip
df = spark.sparkContext.wholeTextFiles('mt_oncology_10/mt_note_*.txt').toDF().withColumnRenamed('_1','path').withColumnRenamed('_2','text')
df.show(truncate=50)
sample_text = df.limit(2).select("text").collect()[1][0]
print(sample_text)
###Output
Medical Specialty:Hematology - Oncology
Sample Name: Mullerian Adenosarcoma
Description: Discharge summary of a patient presenting with a large mass aborted through the cervix.
(Medical Transcription Sample Report)
PRINCIPAL DIAGNOSIS: Mullerian adenosarcoma.
HISTORY OF PRESENT ILLNESS: The patient is a 56-year-old presenting with a large mass aborted through the cervix.
PHYSICAL EXAM:CHEST: Clear. There is no heart murmur. ABDOMEN: Nontender.
PELVIC: There is a large mass in the vagina.
HOSPITAL COURSE: The patient went to surgery on the day of admission. The postoperative course was marked by fever and ileus. The patient regained bowel function. She was discharged on the morning of the seventh postoperative day.
OPERATIONS: July 25, 2006: Total abdominal hysterectomy, bilateral salpingo-oophorectomy.
DISCHARGE CONDITION: Stable.
PLAN: The patient will remain at rest initially with progressive ambulation thereafter. She will avoid lifting, driving, stairs, or intercourse. She will call me for fevers, drainage, bleeding, or pain. Family history, social history, and psychosocial needs per the social worker. The patient will follow up in my office in one week.
PATHOLOGY: Mullerian adenosarcoma.
MEDICATIONS: Percocet 5, #40, one q.3 h. p.r.n. pain.
###Markdown
ICD-10 code extractionNow, we will create a pipeline to extract ICD10 codes. This pipeline will find diseases and problems and then map their ICD10 codes. We will also check if this problem is still present or not.
###Code
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
sentenceDetector = SentenceDetectorDLModel.pretrained()\
.setInputCols(["document"])\
.setOutputCol("sentence")
tokenizer = Tokenizer()\
.setInputCols(["sentence"])\
.setOutputCol("token")\
word_embeddings = WordEmbeddingsModel.pretrained("embeddings_clinical", "en", "clinical/models")\
.setInputCols(["sentence", "token"])\
.setOutputCol("embeddings")
c2doc = Chunk2Doc()\
.setInputCols("ner_chunk")\
.setOutputCol("ner_chunk_doc")
clinical_ner = MedicalNerModel.pretrained("ner_jsl", "en", "clinical/models") \
.setInputCols(["sentence", "token", "embeddings"]) \
.setOutputCol("ner")
ner_converter = NerConverter() \
.setInputCols(["sentence", "token", "ner"]) \
.setOutputCol("ner_chunk")\
.setWhiteList(["Oncological", "Disease_Syndrome_Disorder", "Heart_Disease"])
sbert_embedder = BertSentenceEmbeddings\
.pretrained("sbiobert_base_cased_mli",'en','clinical/models')\
.setInputCols(["ner_chunk_doc"])\
.setOutputCol("sbert_embeddings")
icd10_resolver = SentenceEntityResolverModel.pretrained("sbiobertresolve_icd10cm_augmented_billable_hcc","en", "clinical/models")\
.setInputCols(["ner_chunk", "sbert_embeddings"])\
.setOutputCol("icd10cm_code")\
.setDistanceFunction("EUCLIDEAN")\
.setReturnCosineDistances(True)
clinical_assertion = AssertionDLModel.pretrained("jsl_assertion_wip", "en", "clinical/models") \
.setInputCols(["sentence", "ner_chunk", "embeddings"]) \
.setOutputCol("assertion")
resolver_pipeline = Pipeline(
stages = [
documentAssembler,
sentenceDetector,
tokenizer,
word_embeddings,
clinical_ner,
ner_converter,
c2doc,
sbert_embedder,
icd10_resolver,
clinical_assertion
])
data_ner = spark.createDataFrame([[""]]).toDF("text")
icd_model = resolver_pipeline.fit(data_ner)
###Output
sentence_detector_dl download started this may take some time.
Approximate size to download 354.6 KB
[OK!]
embeddings_clinical download started this may take some time.
Approximate size to download 1.6 GB
[OK!]
ner_jsl download started this may take some time.
Approximate size to download 14.5 MB
[OK!]
sbiobert_base_cased_mli download started this may take some time.
Approximate size to download 384.3 MB
[OK!]
sbiobertresolve_icd10cm_augmented_billable_hcc download started this may take some time.
Approximate size to download 1.4 GB
[OK!]
jsl_assertion_wip download started this may take some time.
Approximate size to download 1.4 MB
[OK!]
###Markdown
We can transform the data. In path column, we have long path. Instead we will use filename column. Every file name refers to different patient.
###Code
path_array = F.split(df['path'], '/')
df = df.withColumn('filename', path_array.getItem(F.size(path_array)- 1)).select(['filename', 'text'])
icd10_sdf = icd_model.transform(df)
###Output
_____no_output_____
###Markdown
Let's see how our model extracted ICD Codes on a sample.
###Code
light_model = LightPipeline(icd_model)
light_result = light_model.fullAnnotate(sample_text)
from sparknlp_display import EntityResolverVisualizer
vis = EntityResolverVisualizer()
# Change color of an entity label
vis.set_label_colors({'PROBLEM':'#008080'})
vis.display(light_result[0], 'ner_chunk', 'icd10cm_code')
###Output
_____no_output_____
###Markdown
ICD resolver can also tell us HCC status. HCC status is 1 if the Medicare Risk Adjusment model contains ICD code.
###Code
icd10_hcc_df = icd10_sdf.select("filename", F.explode(F.arrays_zip('ner_chunk.result',
'icd10cm_code.result',
'icd10cm_code.metadata',
"assertion.result"
)).alias("cols")) \
.select("filename", F.expr("cols['0']").alias("chunk"),
F.expr("cols['1']").alias("icd10_code"),
F.expr("cols['2']['all_k_aux_labels']").alias("hcc_list"),
F.expr("cols['3']").alias("assertion")
).toPandas()
icd10_hcc_df.head()
icd10_hcc_df["hcc_status"] = icd10_hcc_df["hcc_list"].apply(lambda x: x.split("||")[1])
icd10_df = icd10_hcc_df.drop("hcc_list", axis = 1)
icd10_df.head()
icd10_df = icd10_df[icd10_df.hcc_status=="1"]
icd10_df = icd10_df[~icd10_df.assertion.isin(["Family", "Past"])][['filename','chunk','icd10_code']].drop_duplicates()
###Output
_____no_output_____
###Markdown
We filtered the ICD codes based on HCC status. Now, we will create an ICD_code list column
###Code
icd10_df['Extracted_Entities_vs_ICD_Codes'] = list(zip(icd10_df.chunk, icd10_df.icd10_code))
icd10_df.head(10)
icd10_codes= icd10_df.groupby("filename").icd10_code.apply(lambda x: list(x)).reset_index()
icd10_vs_entities = icd10_df.groupby("filename").Extracted_Entities_vs_ICD_Codes.apply(lambda x: list(x)).reset_index()
icd10_df_all = icd10_codes.merge(icd10_vs_entities)
icd10_df_all
###Output
_____no_output_____
###Markdown
Gender ClassificationIn Spark NLP, we have a pretrained model to detect gender of patient. Let's use it by `ClassifierDLModel`
###Code
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
tokenizer = Tokenizer()\
.setInputCols(["document"])\
.setOutputCol("token")\
biobert_embeddings = BertEmbeddings().pretrained('biobert_pubmed_base_cased') \
.setInputCols(["document",'token'])\
.setOutputCol("bert_embeddings")
sentence_embeddings = SentenceEmbeddings() \
.setInputCols(["document", "bert_embeddings"]) \
.setOutputCol("sentence_bert_embeddings") \
.setPoolingStrategy("AVERAGE")
genderClassifier = ClassifierDLModel.pretrained('classifierdl_gender_biobert', 'en', 'clinical/models') \
.setInputCols(["document", "sentence_bert_embeddings"]) \
.setOutputCol("gender")
gender_pipeline = Pipeline(stages=[documentAssembler,
#sentenceDetector,
tokenizer,
biobert_embeddings,
sentence_embeddings,
genderClassifier])
data_ner = spark.createDataFrame([[""]]).toDF("text")
gender_model = gender_pipeline.fit(data_ner)
gender_df = gender_model.transform(df)
gender_pd_df = gender_df.select("filename", F.explode(F.arrays_zip('gender.result', 'gender.metadata')).alias("cols")) \
.select("filename",
F.expr("cols['0']").alias("Gender"),
F.expr("cols['1']['Female']").alias("Female"),
F.expr("cols['1']['Male']").alias("Male")).toPandas()
gender_pd_df['Gender'] = gender_pd_df.apply(lambda x : "F" if float(x['Female']) >= float(x['Male']) else "M", axis=1)
gender_pd_df = gender_pd_df[['filename', 'Gender']]
###Output
_____no_output_____
###Markdown
All patients' gender is ready in a dataframe.
###Code
gender_pd_df
###Output
_____no_output_____
###Markdown
AgeWe can get patient's age forom the notes by another pipeline. We are creating an age pipeline to get AGE labelled entities. In a note, more than one age entity can be extracted. We will get the first age entity as patient's age.
###Code
date_ner_converter = NerConverter() \
.setInputCols(["sentence", "token", "ner"]) \
.setOutputCol("ner_chunk")\
.setWhiteList(["Age"])
age_pipeline = Pipeline(
stages = [
documentAssembler,
sentenceDetector,
tokenizer,
word_embeddings,
clinical_ner,
date_ner_converter
])
data_ner = spark.createDataFrame([[""]]).toDF("text")
age_model = age_pipeline.fit(data_ner)
light_model = LightPipeline(age_model)
light_result = light_model.fullAnnotate(sample_text)
from sparknlp_display import NerVisualizer
visualiser = NerVisualizer()
ner_vis = visualiser.display(light_result[0], label_col='ner_chunk', document_col='document')
age_result = age_model.transform(df)
age_df = age_result.select("filename",F.explode(F.arrays_zip('ner_chunk.result', 'ner_chunk.metadata')).alias("cols")) \
.select("filename",
F.expr("cols['0']").alias("Age"),
F.expr("cols['1']['entity']").alias("ner_label")).toPandas().groupby('filename').first().reset_index()
age_df.Age = age_df.Age.replace(r"\D", "", regex = True).astype(int)
age_df.drop('ner_label', axis=1, inplace=True)
age_df.head()
###Output
_____no_output_____
###Markdown
Calculating Medicare Risk Adjusment ScoreNow, we have all data which can be extracted from clinical notes. Now we can calculate Medicare Risk Adjusment Score.
###Code
patient_df = age_df.merge(icd10_df_all, on='filename', how = "left")\
.merge(gender_pd_df, on='filename', how = "left")
patient_df = patient_df.dropna()
patient_df.info()
df = spark.createDataFrame(patient_df)
df.show(truncate=False)
from pyspark.sql.types import MapType, IntegerType, DoubleType, StringType, StructType, StructField, FloatType
import pyspark.sql.functions as f
schema = StructType([
StructField('risk_score', FloatType()),
StructField('hcc_lst', StringType()),
StructField('parameters', StringType()),
StructField('details', StringType())])
df = df.withColumn("hcc_profile", profile(df.icd10_code, df.Age, df.Gender))
df = df.withColumn("hcc_profile", F.from_json(F.col("hcc_profile"), schema))
df= df.withColumn("risk_score", df.hcc_profile.getItem("risk_score"))\
.withColumn("hcc_lst", df.hcc_profile.getItem("hcc_lst"))\
.withColumn("parameters", df.hcc_profile.getItem("parameters"))\
.withColumn("details", df.hcc_profile.getItem("details"))\
df.select('risk_score','icd10_code', 'Age', 'Gender').show(truncate=False )
df.show(truncate=100, vertical=True)
###Output
+----------+---------------------------------------+---+------+
|risk_score|icd10_code |Age|Gender|
+----------+---------------------------------------+---+------+
|0.15 |[C801] |59 |F |
|1.419 |[C499, C499, D6181, M069] |66 |F |
|3.265 |[C5092, C5091, C5092, C800, G20, C5092]|57 |F |
|0.309 |[F319] |63 |F |
|2.982 |[C459, C800] |66 |F |
|1.372 |[D5702, K5505] |19 |M |
|0.15 |[C6960, C6960] |16 |M |
+----------+---------------------------------------+---+------+
-RECORD 0-------------------------------------------------------------------------------------------------------------------------------
filename | mt_note_01.txt
Age | 59
icd10_code | [C801]
Extracted_Entities_vs_ICD_Codes | [{cancer, C801}]
Gender | F
hcc_profile | {0.15, ["D1","HCC12"], {"elig":"CNA","age":59,"sex":"F","origds":false,"disabled":false,"medicaid...
risk_score | 0.15
hcc_lst | ["D1","HCC12"]
parameters | {"elig":"CNA","age":59,"sex":"F","origds":false,"disabled":false,"medicaid":false}
details | {"CNA_D1":0.0,"CNA_HCC12":0.15}
-RECORD 1-------------------------------------------------------------------------------------------------------------------------------
filename | mt_note_03.txt
Age | 66
icd10_code | [C499, C499, D6181, M069]
Extracted_Entities_vs_ICD_Codes | [{leiomyosarcoma, C499}, {Leiomyosarcoma, C499}, {Pancytopenia, D6181}, {rheumatoid arthritis, M0...
Gender | F
hcc_profile | {1.419, ["D2","HCC10","HCC40"], {"elig":"CNA","age":66,"sex":"F","origds":false,"disabled":false,...
risk_score | 1.419
hcc_lst | ["D2","HCC10","HCC40"]
parameters | {"elig":"CNA","age":66,"sex":"F","origds":false,"disabled":false,"medicaid":false}
details | {"CNA_F65_69":0.323,"CNA_D2":0.0,"CNA_HCC10":0.675,"CNA_HCC40":0.421}
-RECORD 2-------------------------------------------------------------------------------------------------------------------------------
filename | mt_note_05.txt
Age | 57
icd10_code | [C5092, C5091, C5092, C800, G20, C5092]
Extracted_Entities_vs_ICD_Codes | [{Breast Cancer, C5092}, {ductal carcinoma of the left breast, C5091}, {breast cancer, C5092}, {m...
Gender | F
hcc_profile | {3.265, ["HCC78","D2","HCC8"], {"elig":"CNA","age":57,"sex":"F","origds":false,"disabled":false,"...
risk_score | 3.265
hcc_lst | ["HCC78","D2","HCC8"]
parameters | {"elig":"CNA","age":57,"sex":"F","origds":false,"disabled":false,"medicaid":false}
details | {"CNA_HCC78":0.606,"CNA_D2":0.0,"CNA_HCC8":2.659}
-RECORD 3-------------------------------------------------------------------------------------------------------------------------------
filename | mt_note_06.txt
Age | 63
icd10_code | [F319]
Extracted_Entities_vs_ICD_Codes | [{Type 1 bipolar disease, F319}]
Gender | F
hcc_profile | {0.309, ["D1","HCC59"], {"elig":"CNA","age":63,"sex":"F","origds":false,"disabled":false,"medicai...
risk_score | 0.309
hcc_lst | ["D1","HCC59"]
parameters | {"elig":"CNA","age":63,"sex":"F","origds":false,"disabled":false,"medicaid":false}
details | {"CNA_D1":0.0,"CNA_HCC59":0.309}
-RECORD 4-------------------------------------------------------------------------------------------------------------------------------
filename | mt_note_08.txt
Age | 66
icd10_code | [C459, C800]
Extracted_Entities_vs_ICD_Codes | [{malignant mesothelioma, C459}, {metastatic disease, C800}]
Gender | F
hcc_profile | {2.982, ["HCC8","D1"], {"elig":"CNA","age":66,"sex":"F","origds":false,"disabled":false,"medicaid...
risk_score | 2.982
hcc_lst | ["HCC8","D1"]
parameters | {"elig":"CNA","age":66,"sex":"F","origds":false,"disabled":false,"medicaid":false}
details | {"CNA_F65_69":0.323,"CNA_HCC8":2.659,"CNA_D1":0.0}
-RECORD 5-------------------------------------------------------------------------------------------------------------------------------
filename | mt_note_09.txt
Age | 19
icd10_code | [D5702, K5505]
Extracted_Entities_vs_ICD_Codes | [{Sickle cell crisis, D5702}, {Veno-occlusive crisis, K5505}]
Gender | M
hcc_profile | {1.372, ["D1","HCC46"], {"elig":"CNA","age":19,"sex":"M","origds":false,"disabled":false,"medicai...
risk_score | 1.372
hcc_lst | ["D1","HCC46"]
parameters | {"elig":"CNA","age":19,"sex":"M","origds":false,"disabled":false,"medicaid":false}
details | {"CNA_D1":0.0,"CNA_HCC46":1.372}
-RECORD 6-------------------------------------------------------------------------------------------------------------------------------
filename | mt_note_10.txt
Age | 16
icd10_code | [C6960, C6960]
Extracted_Entities_vs_ICD_Codes | [{Rhabdomyosarcoma of the left orbit, C6960}, {rhabdomyosarcoma of the left orbit, C6960}]
Gender | M
hcc_profile | {0.15, ["D1","HCC12"], {"elig":"CNA","age":16,"sex":"M","origds":false,"disabled":false,"medicaid...
risk_score | 0.15
hcc_lst | ["D1","HCC12"]
parameters | {"elig":"CNA","age":16,"sex":"M","origds":false,"disabled":false,"medicaid":false}
details | {"CNA_D1":0.0,"CNA_HCC12":0.15}
|
openmdao/docs/openmdao_book/features/core_features/working_with_groups/set_order.ipynb | ###Markdown
Setting the Order of Subsystems in a GroupBy default, subsystems are executed in the same order in which they were added totheir parent Group. In order to change this order, use the `set_order` method.```{eval-rst} .. automethod:: openmdao.core.group.Group.set_order :noindex:```The list of names provided to `set_order` must contain every subsystem that hasbeen added to the Group.```{Note}Use caution when setting the order of execution of your subsystems, whetherby just calling `add_subsystem` in a specific order, or by later changingthe order using `set_order`. If you choose an order that doesn't followthe natural data flow order of your subsystems, your model may take longerto converge.``` UsageChange the execution order of components *C1* and *C3*.
###Code
class ReportOrderComp(om.ExplicitComponent):
"""Adds name to list."""
def __init__(self, order_list):
super().__init__()
self._order_list = order_list
def compute(self, inputs, outputs):
self._order_list.append(self.pathname)
# this list will record the execution order of our C1, C2, and C3 components
order_list = []
prob = om.Problem()
model = prob.model
model.add_subsystem('C1', ReportOrderComp(order_list))
model.add_subsystem('C2', ReportOrderComp(order_list))
model.add_subsystem('C3', ReportOrderComp(order_list))
prob.setup()
prob.run_model()
print(order_list)
assert(order_list == ['C1', 'C2', 'C3'])
# reset the shared order list
order_list[:] = []
prob.setup()
# now swap C2 and C1 in the order
model.set_order(['C2', 'C1', 'C3'])
# after changing the order, we must call setup again
prob.setup()
prob.run_model()
print(order_list)
assert(order_list == ['C2', 'C1', 'C3'])
###Output
_____no_output_____
###Markdown
Setting the Order of Subsystems in a GroupBy default, subsystems are executed in the same order in which they were added totheir parent Group. In order to change this order, use the `set_order` method.```{eval-rst} .. automethod:: openmdao.core.group.Group.set_order :noindex:```The list of names provided to `set_order` must contain every subsystem that hasbeen added to the Group.```{Note}Use caution when setting the order of execution of your subsystems, whetherby just calling `add_subsystem` in a specific order, or by later changingthe order using `set_order`. If you choose an order that doesn't followthe natural data flow order of your subsystems, your model may take longerto converge.``` UsageChange the execution order of components *C1* and *C3*.
###Code
import openmdao.api as om
class ReportOrderComp(om.ExplicitComponent):
"""Adds name to list."""
def __init__(self, order_list):
super().__init__()
self._order_list = order_list
def compute(self, inputs, outputs):
self._order_list.append(self.pathname)
# this list will record the execution order of our C1, C2, and C3 components
order_list = []
prob = om.Problem()
model = prob.model
model.add_subsystem('C1', ReportOrderComp(order_list))
model.add_subsystem('C2', ReportOrderComp(order_list))
model.add_subsystem('C3', ReportOrderComp(order_list))
prob.setup()
prob.run_model()
print(order_list)
assert(order_list == ['C1', 'C2', 'C3'])
# reset the shared order list
order_list[:] = []
prob.setup()
# now swap C2 and C1 in the order
model.set_order(['C2', 'C1', 'C3'])
# after changing the order, we must call setup again
prob.setup()
prob.run_model()
print(order_list)
assert(order_list == ['C2', 'C1', 'C3'])
###Output
_____no_output_____ |
session-two/session_two_filled_template.ipynb | ###Markdown
Spotted a mistake? Report it here Beginner's Python—Session Two Template User Input Introduction Use the input function to ask the user a question and print the response
###Code
colour = input("What's your favourite colour?")
print("No way! My favourite colour is", colour, "too!")
###Output
What's your favourite colour?Blue
No way! My favourite colour is Blue too!
###Markdown
Ask for numeric user input and assign this to a variable. Check this variables type and then convert it to an integer. Check the type again, perform some maths on the variable, and print the result
###Code
apples = input("How many apples do you need to make a pie?")
print(type(apples))
apples = int(apples)
print(type(apples))
pie_size = apples * 5
print(pie_size, "cm diameter")
###Output
How many apples do you need to make a pie?5
<class 'str'>
<class 'int'>
25 cm diameter
###Markdown
Standard Puzzles Remake the farm yard animal puzzle from session one using user input
###Code
num_chickens = int(input("How many chickens are on the farm? "))
num_cows = int(input("How many cows are on the farm? "))
num_pigs = int(input("How many pigs are on the farm? "))
print("There are",
2 + num_chickens * 2 + (num_cows + num_pigs) * 4,
"legs on the farm")
###Output
There are 32 legs on the farm
###Markdown
Bonus Puzzles Try adding `\r\n` to the end of your question in `input()`
###Code
response = input("Are you going to answer on a new line?\r\n")
###Output
_____no_output_____
###Markdown
Variables Types Introduction Add two strings together to concatenate their contents
###Code
"Hello" + "World"
###Output
_____no_output_____
###Markdown
Remember, we don't need to use `print()` here since we are only outputting one thing. You're welcome to add it if you think it makes your code more clear though Multiply a string by an integer
###Code
"Echo..." * 3
###Output
_____no_output_____
###Markdown
Try to multiply a string by a whole number stored as a float type to confirm that you recieve an error
###Code
"Echo..." * 3.0
###Output
_____no_output_____
###Markdown
Standard Puzzles What could go wrong with the presentation code? A user could input a string that can't be converted to an integer Summation Introduction Create a list of numbers
###Code
numbers = [6, 3, 5, 8]
###Output
_____no_output_____
###Markdown
Calculate the sum of these numbers and store this as a variable, then print this value
###Code
total = sum(numbers)
print(total)
###Output
22
###Markdown
Standard Puzzles Create a list with the values 193, 452, 67, 891, and 10
###Code
nums = [193, 452, 67, 891, 10]
###Output
_____no_output_____
###Markdown
Find the sum of the items in the list and store it as a variable
###Code
total = sum(nums)
###Output
_____no_output_____
###Markdown
Use this variable to print the sum as part of a meaningful statement
###Code
print("The total of your numbers was", total)
###Output
The total of your numbers was 1613
###Markdown
Bonus Puzzles Confirm that lists can contain negative and decimal numbers and that `sum()` behaves as you would expect with these
###Code
more_nums = [4, 3.25, -1]
print(sum(more_nums))
###Output
6.25
###Markdown
Min & Max Introduction Create a list of numbers
###Code
heights = [177, 156, 151, 167, 149, 181, 172]
###Output
_____no_output_____
###Markdown
Calculate the minimum of this list and print it in a meaningful statement
###Code
shortest = min(heights)
print("The shortest person is",
shortest, "cm")
###Output
The shortest person is 149 cm
###Markdown
Repeat this for the maximum
###Code
tallest = max(heights)
print("The tallest person is",
tallest, "cm")
###Output
The tallest person is 181 cm
###Markdown
Standard Puzzles Create a list of numbers and find the minimum and maximum
###Code
my_nums = [5, 3, -4, 6, 8]
smallest = min(my_nums)
largest = max(my_nums)
###Output
_____no_output_____
###Markdown
Use these to find the range of the data and print this in a meaningful statement
###Code
spread = largest - smallest
print("The range of the data is", spread)
###Output
The range of the data is 12
###Markdown
Length Introduction Create a list of numbers and print the number of items it contains
###Code
numbers = [4, 5, 2, 6]
print(len(numbers))
###Output
4
###Markdown
Create a string and print out the number of characters it contains
###Code
sentence = "Isn't Python Great!"
print(len(sentence))
###Output
19
###Markdown
Standard Puzzles Ask the user for a string input
###Code
text = input("Say something...")
###Output
Say something...Go on then. How's this?
###Markdown
Output the length of this string
###Code
print("Your sentence was",
len(text),
"characters long!")
###Output
Your sentence was 23 characters long!
###Markdown
Bonus Puzzles Use the supplied code to let the user input the values of a list
###Code
raw = input("What values are in your list?\r\n")
numbers = list(map(int, raw.split(',')))
###Output
What values are in your list?
5, 7, 4, 9
###Markdown
Print the length of this list
###Code
print("You entered", len(numbers), "numbers")
###Output
You entered 4 numbers
|
notebooks/npfast/npfast_speed_benchmarks.ipynb | ###Markdown
speed comparison of npfast functions to native np functions- for a description of each function tested, see regression_code.storm.npfast.tests.test_npfast.py- for tests of precision, see regression_code.storm.npfast.tests.npfast_precision_benchmarks.py- test parameters - each function is applied to each matrix 100 times - main loop tests each function 10 times in a row - main loop is run 10 times - tests performed on dmt01 (mkl, 8 core)- test results: - absolute times use units of seconds - for sum and mean, npfast is slower than np for small matrices (< ~1000x1000) - for large matrices, all npfast operations are significantly faster than np
###Code
from __future__ import print_function
import time
import numexpr
import numpy as np
import scipy.stats
from regression_code.storm import npfast
from regression_code.storm.tests import test_datasets
from regression_code.storm.tests import test_utils
from regression_code.storm.npfast.tests import test_npfast
###Output
_____no_output_____
###Markdown
create test data
###Code
test_matrices = test_datasets.create_random_single_matrices()
test_utils.rprint(test_matrices, 'test_matrices')
###Output
[test_matrices]: dict [3]
- 'big': list [3]
- ndarray (1000, 100000)
- ndarray (4000, 100000)
- ndarray (10000, 100000)
- 'medium': list [4]
- ndarray (250, 10000)
- ndarray (500, 10000)
- ndarray (1000, 10000)
- ndarray (2000, 10000)
- 'small': list [4]
- ndarray (100, 1000)
- ndarray (200, 1000)
- ndarray (400, 1000)
- ndarray (800, 1000)
###Markdown
sum
###Code
sum_results = test_npfast.compute_time_results(
test_matrices,
test_npfast.sum_functions,
)
test_npfast.tabulate_time_results(
sum_results,
test_matrices,
test_npfast.sum_functions,
)
###Output
Matrix Set: small
=================
Absolute Times
--------------
100x1000 200x1000 400x1000 800x1000
np.sum 3.8435e-05 6.2935e-05 0.00012215 0.00024306
thread_sum 0.0064512 0.007456 0.0063118 0.005515
dot_sum 0.0021286 2.4316e-05 4.0698e-05 5.5978e-05
Relative Times
--------------
100x1000 200x1000 400x1000 800x1000
np.sum 1.0 1.0 1.0 1.0
thread_sum 167.84 118.47 51.672 22.69
dot_sum 55.38 0.38637 0.33318 0.2303
Matrix Set: medium
==================
Absolute Times
--------------
250x10000 500x10000 1000x10000 2000x10000
np.sum 0.00088425 0.0029563 0.0047575 0.0088456
thread_sum 0.017091 0.020636 0.024149 0.024371
dot_sum 0.0014081 0.0016399 0.0018929 0.002377
Relative Times
--------------
250x10000 500x10000 1000x10000 2000x10000
np.sum 1.0 1.0 1.0 1.0
thread_sum 19.328 6.9805 5.076 2.7551
dot_sum 1.5924 0.55471 0.39787 0.26873
Matrix Set: big
===============
Absolute Times
--------------
1000x100000 4000x100000 10000x100000
np.sum 0.047642 0.17869 0.45122
thread_sum 0.029529 0.069668 0.13193
dot_sum 0.01262 0.044622 0.10947
Relative Times
--------------
1000x100000 4000x100000 10000x100000
np.sum 1.0 1.0 1.0
thread_sum 0.61981 0.38989 0.29237
dot_sum 0.26488 0.24972 0.24262
###Markdown
mean
###Code
mean_results = test_npfast.compute_time_results(
test_matrices,
test_npfast.mean_functions,
)
test_npfast.tabulate_time_results(
mean_results,
test_matrices,
test_npfast.mean_functions,
)
###Output
Matrix Set: small
=================
Absolute Times
--------------
100x1000 200x1000 400x1000 800x1000
np.mean 3.8512e-05 5.6632e-05 9.9292e-05 0.00017665
dot_mean 2.6317e-05 2.5761e-05 4.2188e-05 6.4394e-05
Relative Times
--------------
100x1000 200x1000 400x1000 800x1000
np.mean 1.0 1.0 1.0 1.0
dot_mean 0.68334 0.45489 0.42489 0.36452
Matrix Set: medium
==================
Absolute Times
--------------
250x10000 500x10000 1000x10000 2000x10000
np.mean 0.00092029 0.0020326 0.0042749 0.0080553
dot_mean 0.00041 0.0014811 0.0016804 0.0031063
Relative Times
--------------
250x10000 500x10000 1000x10000 2000x10000
np.mean 1.0 1.0 1.0 1.0
dot_mean 0.44551 0.7287 0.39308 0.38562
Matrix Set: big
===============
Absolute Times
--------------
1000x100000 4000x100000 10000x100000
np.mean 0.046801 0.18504 0.46221
dot_mean 0.012181 0.046062 0.11421
Relative Times
--------------
1000x100000 4000x100000 10000x100000
np.mean 1.0 1.0 1.0
dot_mean 0.26028 0.24893 0.2471
###Markdown
std
###Code
std_results = test_npfast.compute_time_results(
test_matrices,
test_npfast.std_functions,
)
test_npfast.tabulate_time_results(
std_results,
test_matrices,
test_npfast.std_functions,
)
###Output
Matrix Set: small
=================
Absolute Times
--------------
100x1000 200x1000 400x1000 800x1000
np.std 0.00066489 0.00087406 0.0010557 0.001632
arithmetic_std 0.00052036 0.00071788 0.00098798 0.0017626
dot_std 0.00039404 0.00057585 0.0011543 0.00098377
dot_std_inplace 0.00042077 0.00040123 0.00067833 0.00098361
numexpr_std 0.00048243 0.00055937 0.0011042 0.0018497
dot_std_demeaned 0.00028901 0.00031125 0.00041497 0.00072337
dot_std_inplace_demeaned 0.00030629 0.00030851 0.00043909 0.00081434
numexpr_std_demeaned 0.00036021 0.00038046 0.00047732 0.00080855
Relative Times
--------------
100x1000 200x1000 400x1000 800x1000
np.std 1.0 1.0 1.0 1.0
arithmetic_std 0.78262 0.82131 0.93582 1.08
dot_std 0.59264 0.65882 1.0934 0.60279
dot_std_inplace 0.63284 0.45904 0.64252 0.60269
numexpr_std 0.72558 0.63997 1.0459 1.1334
dot_std_demeaned 0.43467 0.3561 0.39306 0.44323
dot_std_inplace_demeaned 0.46066 0.35297 0.41591 0.49897
numexpr_std_demeaned 0.54176 0.43528 0.45212 0.49542
Matrix Set: medium
==================
Absolute Times
--------------
250x10000 500x10000 1000x10000 2000x10000
np.std 0.0061365 0.012036 0.023922 0.043123
arithmetic_std 0.00651 0.015249 0.027053 0.051959
dot_std 0.0072078 0.0060937 0.014177 0.024191
dot_std_inplace 0.0041262 0.0056038 0.0099864 0.018251
numexpr_std 0.0034501 0.0058175 0.013887 0.023109
dot_std_demeaned 0.0030304 0.0072946 0.010978 0.022559
dot_std_inplace_demeaned 0.0027666 0.0042456 0.0080116 0.011792
numexpr_std_demeaned 0.0020223 0.0037367 0.010478 0.018753
Relative Times
--------------
250x10000 500x10000 1000x10000 2000x10000
np.std 1.0 1.0 1.0 1.0
arithmetic_std 1.0609 1.267 1.1309 1.2049
dot_std 1.1746 0.5063 0.59264 0.56097
dot_std_inplace 0.67241 0.46561 0.41746 0.42324
numexpr_std 0.56222 0.48336 0.5805 0.53589
dot_std_demeaned 0.49384 0.60609 0.45889 0.52314
dot_std_inplace_demeaned 0.45084 0.35275 0.33491 0.27346
numexpr_std_demeaned 0.32956 0.31047 0.43803 0.43488
Matrix Set: big
===============
Absolute Times
--------------
1000x100000 4000x100000 10000x100000
np.std 0.23244 0.93619 2.433
arithmetic_std 0.26751 1.0651 2.8271
dot_std 0.11233 0.42895 1.0459
dot_std_inplace 0.082346 0.32396 0.77524
numexpr_std 0.099648 0.33871 0.69472
dot_std_demeaned 0.10274 0.39848 0.9497
dot_std_inplace_demeaned 0.054679 0.2258 0.54465
numexpr_std_demeaned 0.063208 0.21712 0.50009
Relative Times
--------------
1000x100000 4000x100000 10000x100000
np.std 1.0 1.0 1.0
arithmetic_std 1.1509 1.1377 1.162
dot_std 0.48328 0.45819 0.42986
dot_std_inplace 0.35427 0.34605 0.31863
numexpr_std 0.4287 0.3618 0.28554
dot_std_demeaned 0.44201 0.42564 0.39033
dot_std_inplace_demeaned 0.23524 0.24119 0.22386
numexpr_std_demeaned 0.27193 0.23191 0.20554
###Markdown
zscore
###Code
zscore_results = test_npfast.compute_time_results(
test_matrices,
test_npfast.zscore_functions,
)
test_npfast.tabulate_time_results(
zscore_results,
test_matrices,
test_npfast.zscore_functions,
)
###Output
Matrix Set: small
=================
Absolute Times
--------------
100x1000 200x1000 400x1000 800x1000
scipy.zscore 0.00075774 0.00090483 0.0015254 0.0030792
np_arithmetic 0.00084754 0.00094026 0.0014719 0.0032721
dot 0.001005 0.00061288 0.0010645 0.0018892
dot_inplace 0.00038603 0.0005959 0.00089391 0.0017489
arithmetic_demeaned 0.00028709 0.00050362 0.0011099 0.0018977
dot_demeaned 0.00036931 0.00037931 0.00072983 0.0013024
dot_inplace_demeaned 0.00015157 0.00039869 0.00075939 0.0013243
numexpr 0.00089875 0.0027096 0.0016729 0.002867
numexpr_demeaned 0.00070606 0.00091008 0.0013744 0.0045334
numexpr_inplace 0.00084291 0.00095556 0.0019797 0.0043187
numexpr_inplace_demeaned 0.00065061 0.00086534 0.0012913 0.0026208
Relative Times
--------------
100x1000 200x1000 400x1000 800x1000
scipy.zscore 1.0 1.0 1.0 1.0
np_arithmetic 1.1185 1.0392 0.96488 1.0626
dot 1.3263 0.67734 0.69782 0.61351
dot_inplace 0.50945 0.65857 0.58601 0.56798
arithmetic_demeaned 0.37888 0.55659 0.72758 0.61628
dot_demeaned 0.48738 0.41921 0.47844 0.42295
dot_inplace_demeaned 0.20004 0.44063 0.49782 0.43008
numexpr 1.1861 2.9946 1.0967 0.93107
numexpr_demeaned 0.9318 1.0058 0.90099 1.4722
numexpr_inplace 1.1124 1.0561 1.2978 1.4025
numexpr_inplace_demeaned 0.85861 0.95636 0.84654 0.85112
Matrix Set: medium
==================
Absolute Times
--------------
250x10000 500x10000 1000x10000 2000x10000
scipy.zscore 0.0098424 0.023119 0.045117 0.082803
np_arithmetic 0.009507 0.021575 0.043083 0.085774
dot 0.0079812 0.016545 0.033453 0.066227
dot_inplace 0.0050822 0.011766 0.024402 0.045445
arithmetic_demeaned 0.0059418 0.014123 0.034143 0.059236
dot_demeaned 0.0082124 0.0083401 0.021608 0.044117
dot_inplace_demeaned 0.004291 0.0081265 0.017094 0.032314
numexpr 0.0093924 0.014889 0.034322 0.064482
numexpr_demeaned 0.010827 0.015699 0.030688 0.051205
numexpr_inplace 0.01377 0.017403 0.030368 0.055014
numexpr_inplace_demeaned 0.0091096 0.014312 0.027211 0.047696
Relative Times
--------------
250x10000 500x10000 1000x10000 2000x10000
scipy.zscore 1.0 1.0 1.0 1.0
np_arithmetic 0.96592 0.93319 0.95493 1.0359
dot 0.8109 0.71562 0.74146 0.79982
dot_inplace 0.51636 0.50892 0.54087 0.54883
arithmetic_demeaned 0.6037 0.61088 0.75676 0.71539
dot_demeaned 0.83439 0.36074 0.47894 0.53279
dot_inplace_demeaned 0.43597 0.3515 0.37887 0.39026
numexpr 0.95428 0.64401 0.76073 0.77875
numexpr_demeaned 1.1 0.67902 0.68019 0.6184
numexpr_inplace 1.3991 0.75273 0.6731 0.6644
numexpr_inplace_demeaned 0.92555 0.61904 0.60312 0.57602
Matrix Set: big
===============
Absolute Times
--------------
1000x100000 4000x100000 10000x100000
scipy.zscore 0.4504 1.7784 4.3857
np_arithmetic 0.44146 1.7492 4.3522
dot 0.31592 1.1876 2.9178
dot_inplace 0.21912 0.84798 2.0589
arithmetic_demeaned 0.30818 1.2291 3.0316
dot_demeaned 0.20543 0.78664 1.9059
dot_inplace_demeaned 0.1558 0.58836 1.4405
numexpr 0.22592 0.72247 1.4992
numexpr_demeaned 0.18943 0.62192 1.2843
numexpr_inplace 0.22362 0.68795 1.4357
numexpr_inplace_demeaned 0.18543 0.60005 1.2234
Relative Times
--------------
1000x100000 4000x100000 10000x100000
scipy.zscore 1.0 1.0 1.0
np_arithmetic 0.98015 0.98357 0.99236
dot 0.70141 0.66777 0.6653
dot_inplace 0.4865 0.47681 0.46946
arithmetic_demeaned 0.68424 0.69113 0.69125
dot_demeaned 0.45609 0.44232 0.43456
dot_inplace_demeaned 0.34592 0.33083 0.32844
numexpr 0.5016 0.40624 0.34184
numexpr_demeaned 0.42058 0.3497 0.29284
numexpr_inplace 0.49649 0.38683 0.32736
numexpr_inplace_demeaned 0.41169 0.33741 0.27894
###Markdown
correlation
###Code
correlation_results = test_npfast.compute_time_results(
test_matrices,
test_npfast.correlation_functions,
two_arg=True,
)
test_npfast.tabulate_time_results(
correlation_results,
test_matrices,
test_npfast.correlation_functions,
)
###Output
Matrix Set: small
=================
Absolute Times
--------------
100x1000 200x1000 400x1000 800x1000
scipy_zscore 0.0012048 0.0015308 0.0031849 0.006504
arithmetic 0.00093101 0.0015468 0.0032616 0.0071509
dot 0.0015327 0.0012542 0.0020842 0.0069018
dot_inplace 0.0008838 0.0014453 0.0023018 0.0043241
numexpr 0.0013132 0.0014174 0.002359 0.0035828
arithmetic_1_zscored 0.00067337 0.0010723 0.0019043 0.0031947
inplace_1_zscored 0.00043566 0.00059172 0.0010318 0.0017822
arithmetic_2_zscored 8.1575e-05 0.00014355 0.00030577 0.00052166
inplace_2_zscored 0.00013853 0.0001413 0.00025191 0.00045202
numexpr_1_zscored 0.00056565 0.00073993 0.0010418 0.0021473
numexpr_2_zscored 0.00020582 0.00021449 0.00031353 0.00042427
Relative Times
--------------
100x1000 200x1000 400x1000 800x1000
scipy_zscore 1.0 1.0 1.0 1.0
arithmetic 0.77272 1.0105 1.0241 1.0995
dot 1.2722 0.81931 0.65441 1.0612
dot_inplace 0.73354 0.94413 0.72274 0.66484
numexpr 1.0899 0.92588 0.74067 0.55086
arithmetic_1_zscored 0.55889 0.70047 0.59791 0.49118
inplace_1_zscored 0.36159 0.38654 0.32396 0.27401
arithmetic_2_zscored 0.067706 0.093773 0.096006 0.080207
inplace_2_zscored 0.11498 0.092304 0.079096 0.069498
numexpr_1_zscored 0.46948 0.48336 0.3271 0.33015
numexpr_2_zscored 0.17083 0.14012 0.098442 0.065232
Matrix Set: medium
==================
Absolute Times
--------------
250x10000 500x10000 1000x10000 2000x10000
scipy_zscore 0.02172 0.048138 0.10242 0.19318
arithmetic 0.021466 0.046485 0.10211 0.19646
dot 0.015408 0.039869 0.078938 0.15099
dot_inplace 0.015548 0.026889 0.05596 0.10679
numexpr 0.018339 0.029698 0.057128 0.11126
arithmetic_1_zscored 0.013103 0.029858 0.060245 0.11152
inplace_1_zscored 0.0066994 0.014054 0.029201 0.057461
arithmetic_2_zscored 0.0023925 0.0061289 0.014819 0.023932
inplace_2_zscored 0.0018798 0.0034391 0.0059089 0.011995
numexpr_1_zscored 0.012154 0.016551 0.042922 0.072542
numexpr_2_zscored 0.0036772 0.0038526 0.010813 0.019243
Relative Times
--------------
250x10000 500x10000 1000x10000 2000x10000
scipy_zscore 1.0 1.0 1.0 1.0
arithmetic 0.9883 0.96566 0.99702 1.017
dot 0.70937 0.82821 0.77076 0.7816
dot_inplace 0.71586 0.55859 0.5464 0.55279
numexpr 0.84433 0.61694 0.5578 0.57596
arithmetic_1_zscored 0.60326 0.62024 0.58824 0.5773
inplace_1_zscored 0.30844 0.29196 0.28512 0.29745
arithmetic_2_zscored 0.11015 0.12732 0.1447 0.12388
inplace_2_zscored 0.086545 0.071441 0.057695 0.062092
numexpr_1_zscored 0.55955 0.34381 0.4191 0.37551
numexpr_2_zscored 0.1693 0.080032 0.10558 0.099611
Matrix Set: big
===============
Absolute Times
--------------
1000x100000 4000x100000 10000x100000
scipy_zscore 1.0558 4.0844 10.386
arithmetic 1.0369 4.0905 10.148
dot 0.71526 2.6552 6.5385
dot_inplace 0.49945 1.9007 4.6411
numexpr 0.40702 1.2636 2.7126
arithmetic_1_zscored 0.59988 2.36 5.8146
inplace_1_zscored 0.28225 1.063 2.575
arithmetic_2_zscored 0.12373 0.4925 1.2249
inplace_2_zscored 0.054004 0.21985 0.54287
numexpr_1_zscored 0.26741 0.80514 1.7227
numexpr_2_zscored 0.067837 0.22327 0.49837
Relative Times
--------------
1000x100000 4000x100000 10000x100000
scipy_zscore 1.0 1.0 1.0
arithmetic 0.9821 1.0015 0.97705
dot 0.67745 0.65007 0.62953
dot_inplace 0.47305 0.46536 0.44684
numexpr 0.3855 0.30937 0.26117
arithmetic_1_zscored 0.56817 0.57781 0.55983
inplace_1_zscored 0.26733 0.26026 0.24792
arithmetic_2_zscored 0.11719 0.12058 0.11794
inplace_2_zscored 0.051149 0.053827 0.052268
numexpr_1_zscored 0.25328 0.19713 0.16586
numexpr_2_zscored 0.064251 0.054665 0.047983
###Markdown
isnan
###Code
isnan_results = test_npfast.compute_time_results(
test_matrices,
test_npfast.isnan_functions,
)
test_npfast.tabulate_time_results(
isnan_results,
test_matrices,
test_npfast.isnan_functions,
)
###Output
Matrix Set: small
=================
Absolute Times
--------------
100x1000 200x1000 400x1000 800x1000
np.isnan 2.3212e-05 4.0796e-05 7.3686e-05 0.00014253
numexpr_isnan 0.00016331 0.00022569 0.00039826 0.00072577
Relative Times
--------------
100x1000 200x1000 400x1000 800x1000
np.isnan 1.0 1.0 1.0 1.0
numexpr_isnan 7.0354 5.5323 5.4048 5.0921
Matrix Set: medium
==================
Absolute Times
--------------
250x10000 500x10000 1000x10000 2000x10000
np.isnan 0.00073987 0.0018707 0.0036705 0.0073855
numexpr_isnan 0.0016185 0.0028047 0.0032556 0.0049234
Relative Times
--------------
250x10000 500x10000 1000x10000 2000x10000
np.isnan 1.0 1.0 1.0 1.0
numexpr_isnan 2.1875 1.4992 0.88696 0.66664
Matrix Set: big
===============
Absolute Times
--------------
1000x100000 4000x100000 10000x100000
np.isnan 0.042363 0.16854 0.42016
numexpr_isnan 0.024852 0.087972 0.21252
Relative Times
--------------
1000x100000 4000x100000 10000x100000
np.isnan 1.0 1.0 1.0
numexpr_isnan 0.58664 0.52198 0.5058
###Markdown
nan_to_num
###Code
nan_to_num_results = test_npfast.compute_time_results(
test_matrices,
test_npfast.nan_to_num_functions,
)
test_npfast.tabulate_time_results(
nan_to_num_results,
test_matrices,
test_npfast.nan_to_num_functions,
)
###Output
Matrix Set: small
=================
Absolute Times
--------------
100x1000 200x1000 400x1000 800x1000
np.nan_to_num 0.0013351 0.0019182 0.0036748 0.0071314
numexpr 0.00077827 0.00091107 0.0010442 0.001492
numexpr_inplace 0.00043334 0.00042697 0.00055427 0.00092661
Relative Times
--------------
100x1000 200x1000 400x1000 800x1000
np.nan_to_num 1.0 1.0 1.0 1.0
numexpr 0.58292 0.47495 0.28415 0.20921
numexpr_inplace 0.32457 0.22258 0.15083 0.12993
Matrix Set: medium
==================
Absolute Times
--------------
250x10000 500x10000 1000x10000 2000x10000
np.nan_to_num 0.022043 0.047442 0.096054 0.18179
numexpr 0.0033472 0.0048926 0.012641 0.020989
numexpr_inplace 0.0012488 0.0028838 0.0044437 0.0085549
Relative Times
--------------
250x10000 500x10000 1000x10000 2000x10000
np.nan_to_num 1.0 1.0 1.0 1.0
numexpr 0.15185 0.10313 0.1316 0.11546
numexpr_inplace 0.05665 0.060786 0.046262 0.047059
Matrix Set: big
===============
Absolute Times
--------------
1000x100000 4000x100000 10000x100000
np.nan_to_num 0.9372 3.6531 9.0452
numexpr 0.078212 0.30102 0.75059
numexpr_inplace 0.039435 0.13153 0.31236
Relative Times
--------------
1000x100000 4000x100000 10000x100000
np.nan_to_num 1.0 1.0 1.0
numexpr 0.083453 0.082402 0.082982
numexpr_inplace 0.042077 0.036005 0.034533
###Markdown
copy
###Code
copy_results = test_npfast.compute_time_results(
test_matrices,
test_npfast.copy_functions,
)
test_npfast.tabulate_time_results(
copy_results,
test_matrices,
test_npfast.copy_functions,
)
###Output
Matrix Set: small
=================
Absolute Times
--------------
100x1000 200x1000 400x1000 800x1000
np.copy 0.00012497 0.00014769 0.00026432 0.00041075
numexpr_copy 0.00039342 0.00053356 0.00061444 0.00073181
Relative Times
--------------
100x1000 200x1000 400x1000 800x1000
np.copy 1.0 1.0 1.0 1.0
numexpr_copy 3.148 3.6127 2.3246 1.7816
Matrix Set: medium
==================
Absolute Times
--------------
250x10000 500x10000 1000x10000 2000x10000
np.copy 0.0010633 0.0026036 0.010125 0.019197
numexpr_copy 0.0013504 0.0018839 0.0071575 0.010979
Relative Times
--------------
250x10000 500x10000 1000x10000 2000x10000
np.copy 1.0 1.0 1.0 1.0
numexpr_copy 1.27 0.72359 0.70692 0.57192
Matrix Set: big
===============
Absolute Times
--------------
1000x100000 4000x100000 10000x100000
np.copy 0.089182 0.35548 0.86961
numexpr_copy 0.035706 0.14248 0.35612
Relative Times
--------------
1000x100000 4000x100000 10000x100000
np.copy 1.0 1.0 1.0
numexpr_copy 0.40038 0.40081 0.40952
|
Euler 077 - Prime summations.ipynb | ###Markdown
Euler Problem 77================It is possible to write ten as the sum of primes in exactly five different ways: 7 + 3 5 + 5 5 + 3 + 2 3 + 3 + 2 + 2 2 + 2 + 2 + 2 + 2What is the first value which can be written as the sum of primes in over fivethousand different ways?
###Code
from sympy import sieve
N = 200
s = [0]*N
s[0] = 1
for p in sieve.primerange(1, N):
for k in range(N-1, 0, -1):
for i in range(k-p, -1, -p):
s[k] += s[i]
for k in range(100):
if s[k] > 5000:
print(k, s[k])
break
###Output
71 5007
|
notebooks/old/DRL_single_stock_trading.ipynb | ###Markdown
Deep Reinforcement Learning for Stock Trading from Scratch: Single Stock TradingTutorials to use OpenAI DRL to trade single stock in one Jupyter Notebook | Presented at NeurIPS 2020: Deep RL Workshop* This blog is based on our paper: FinRL: A Deep Reinforcement Learning Library for Automated Stock Trading in Quantitative Finance, presented at NeurIPS 2020: Deep RL Workshop.* Check out medium blog for detailed explanations: * Please report any issues to our Github: https://github.com/AI4Finance-LLC/FinRL-Library/issues Python Package InstallationAs a first step we check if the additional packages needed are present, if not install them. * Yahoo Finance API* pandas* matplotlib* stockstats* OpenAI gym* stable-baselines* tensorflow
###Code
import pkg_resources
import pip
installedPackages = {pkg.key for pkg in pkg_resources.working_set}
required = {'yfinance', 'pandas', 'matplotlib', 'stockstats','stable-baselines','gym','tensorflow'}
missing = required - installedPackages
if missing:
!pip install yfinance
!pip install pandas
!pip install matplotlib
!pip install stockstats
!pip install gym
!pip install stable-baselines[mpi]
!pip install tensorflow==1.15.4
###Output
Collecting yfinance
Downloading https://files.pythonhosted.org/packages/7a/e8/b9d7104d3a4bf39924799067592d9e59119fcfc900a425a12e80a3123ec8/yfinance-0.1.55.tar.gz
Requirement already satisfied: pandas>=0.24 in /usr/local/lib/python3.6/dist-packages (from yfinance) (1.1.2)
Requirement already satisfied: numpy>=1.15 in /usr/local/lib/python3.6/dist-packages (from yfinance) (1.18.5)
Requirement already satisfied: requests>=2.20 in /usr/local/lib/python3.6/dist-packages (from yfinance) (2.23.0)
Requirement already satisfied: multitasking>=0.0.7 in /usr/local/lib/python3.6/dist-packages (from yfinance) (0.0.9)
Collecting lxml>=4.5.1
[?25l Downloading https://files.pythonhosted.org/packages/64/28/0b761b64ecbd63d272ed0e7a6ae6e4402fc37886b59181bfdf274424d693/lxml-4.6.1-cp36-cp36m-manylinux1_x86_64.whl (5.5MB)
[K |████████████████████████████████| 5.5MB 3.6MB/s
[?25hRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.24->yfinance) (2018.9)
Requirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.24->yfinance) (2.8.1)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20->yfinance) (3.0.4)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20->yfinance) (2.10)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20->yfinance) (1.24.3)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20->yfinance) (2020.6.20)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.7.3->pandas>=0.24->yfinance) (1.15.0)
Building wheels for collected packages: yfinance
Building wheel for yfinance (setup.py) ... [?25l[?25hdone
Created wheel for yfinance: filename=yfinance-0.1.55-py2.py3-none-any.whl size=22618 sha256=7d89f66131cbb58f2c7444aa8bc8b6b9353689d8e78f8339f3f83724acd099bd
Stored in directory: /root/.cache/pip/wheels/04/98/cc/2702a4242d60bdc14f48b4557c427ded1fe92aedf257d4565c
Successfully built yfinance
Installing collected packages: lxml, yfinance
Found existing installation: lxml 4.2.6
Uninstalling lxml-4.2.6:
Successfully uninstalled lxml-4.2.6
Successfully installed lxml-4.6.1 yfinance-0.1.55
Requirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (1.1.2)
Requirement already satisfied: numpy>=1.15.4 in /usr/local/lib/python3.6/dist-packages (from pandas) (1.18.5)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas) (2018.9)
Requirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.6/dist-packages (from pandas) (2.8.1)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.7.3->pandas) (1.15.0)
Requirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (3.2.2)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (2.4.7)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (0.10.0)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (1.2.0)
Requirement already satisfied: numpy>=1.11 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (1.18.5)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (2.8.1)
Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from cycler>=0.10->matplotlib) (1.15.0)
Collecting stockstats
Downloading https://files.pythonhosted.org/packages/32/41/d3828c5bc0a262cb3112a4024108a3b019c183fa3b3078bff34bf25abf91/stockstats-0.3.2-py2.py3-none-any.whl
Requirement already satisfied: pandas>=0.18.1 in /usr/local/lib/python3.6/dist-packages (from stockstats) (1.1.2)
Requirement already satisfied: numpy>=1.9.2 in /usr/local/lib/python3.6/dist-packages (from stockstats) (1.18.5)
Collecting int-date>=0.1.7
Downloading https://files.pythonhosted.org/packages/43/27/31803df15173ab341fe7548c14154b54227dfd8f630daa09a1c6e7db52f7/int_date-0.1.8-py2.py3-none-any.whl
Requirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.18.1->stockstats) (2.8.1)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.18.1->stockstats) (2018.9)
Requirement already satisfied: six>=1.9.0 in /usr/local/lib/python3.6/dist-packages (from int-date>=0.1.7->stockstats) (1.15.0)
Installing collected packages: int-date, stockstats
Successfully installed int-date-0.1.8 stockstats-0.3.2
Requirement already satisfied: gym in /usr/local/lib/python3.6/dist-packages (0.17.2)
Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from gym) (1.4.1)
Requirement already satisfied: cloudpickle<1.4.0,>=1.2.0 in /usr/local/lib/python3.6/dist-packages (from gym) (1.3.0)
Requirement already satisfied: pyglet<=1.5.0,>=1.4.0 in /usr/local/lib/python3.6/dist-packages (from gym) (1.5.0)
Requirement already satisfied: numpy>=1.10.4 in /usr/local/lib/python3.6/dist-packages (from gym) (1.18.5)
Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from pyglet<=1.5.0,>=1.4.0->gym) (0.16.0)
Collecting stable-baselines[mpi]
[?25l Downloading https://files.pythonhosted.org/packages/b0/48/d428b79bd4360727925f9fe34afeea7a9da381da3dc8748df834a349ad1d/stable_baselines-2.10.1-py3-none-any.whl (240kB)
[K |████████████████████████████████| 245kB 2.8MB/s
[?25hRequirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (from stable-baselines[mpi]) (3.2.2)
Requirement already satisfied: joblib in /usr/local/lib/python3.6/dist-packages (from stable-baselines[mpi]) (0.16.0)
Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from stable-baselines[mpi]) (1.18.5)
Requirement already satisfied: gym[atari,classic_control]>=0.11 in /usr/local/lib/python3.6/dist-packages (from stable-baselines[mpi]) (0.17.2)
Requirement already satisfied: opencv-python in /usr/local/lib/python3.6/dist-packages (from stable-baselines[mpi]) (4.1.2.30)
Requirement already satisfied: cloudpickle>=0.5.5 in /usr/local/lib/python3.6/dist-packages (from stable-baselines[mpi]) (1.3.0)
Requirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (from stable-baselines[mpi]) (1.1.2)
Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from stable-baselines[mpi]) (1.4.1)
Collecting mpi4py; extra == "mpi"
[?25l Downloading https://files.pythonhosted.org/packages/ec/8f/bbd8de5ba566dd77e408d8136e2bab7fdf2b97ce06cab830ba8b50a2f588/mpi4py-3.0.3.tar.gz (1.4MB)
[K |████████████████████████████████| 1.4MB 8.7MB/s
[?25hRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib->stable-baselines[mpi]) (0.10.0)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->stable-baselines[mpi]) (2.8.1)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->stable-baselines[mpi]) (2.4.7)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->stable-baselines[mpi]) (1.2.0)
Requirement already satisfied: pyglet<=1.5.0,>=1.4.0 in /usr/local/lib/python3.6/dist-packages (from gym[atari,classic_control]>=0.11->stable-baselines[mpi]) (1.5.0)
Requirement already satisfied: atari-py~=0.2.0; extra == "atari" in /usr/local/lib/python3.6/dist-packages (from gym[atari,classic_control]>=0.11->stable-baselines[mpi]) (0.2.6)
Requirement already satisfied: Pillow; extra == "atari" in /usr/local/lib/python3.6/dist-packages (from gym[atari,classic_control]>=0.11->stable-baselines[mpi]) (7.0.0)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas->stable-baselines[mpi]) (2018.9)
Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from cycler>=0.10->matplotlib->stable-baselines[mpi]) (1.15.0)
Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from pyglet<=1.5.0,>=1.4.0->gym[atari,classic_control]>=0.11->stable-baselines[mpi]) (0.16.0)
Building wheels for collected packages: mpi4py
Building wheel for mpi4py (setup.py) ... [?25l[?25hdone
Created wheel for mpi4py: filename=mpi4py-3.0.3-cp36-cp36m-linux_x86_64.whl size=2074501 sha256=a07c9149b0e6ef809d06789949fd347417c7b2912b70d8440ca3e9cb5e3dd987
Stored in directory: /root/.cache/pip/wheels/18/e0/86/2b713dd512199096012ceca61429e12b960888de59818871d6
Successfully built mpi4py
Installing collected packages: mpi4py, stable-baselines
Successfully installed mpi4py-3.0.3 stable-baselines-2.10.1
Collecting tensorflow==1.15.4
[?25l Downloading https://files.pythonhosted.org/packages/8e/64/7a19837dd54d3f53b1ce5ae346ab401dde9678e8f233220317000bfdb3e2/tensorflow-1.15.4-cp36-cp36m-manylinux2010_x86_64.whl (110.5MB)
[K |████████████████████████████████| 110.5MB 1.4MB/s
[?25hCollecting keras-applications>=1.0.8
[?25l Downloading https://files.pythonhosted.org/packages/71/e3/19762fdfc62877ae9102edf6342d71b28fbfd9dea3d2f96a882ce099b03f/Keras_Applications-1.0.8-py3-none-any.whl (50kB)
[K |████████████████████████████████| 51kB 5.0MB/s
[?25hCollecting gast==0.2.2
Downloading https://files.pythonhosted.org/packages/4e/35/11749bf99b2d4e3cceb4d55ca22590b0d7c2c62b9de38ac4a4a7f4687421/gast-0.2.2.tar.gz
Requirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (1.1.2)
Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (3.3.0)
Requirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (0.8.1)
Requirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (1.15.0)
Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (1.1.0)
Requirement already satisfied: google-pasta>=0.1.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (0.2.0)
Collecting tensorflow-estimator==1.15.1
[?25l Downloading https://files.pythonhosted.org/packages/de/62/2ee9cd74c9fa2fa450877847ba560b260f5d0fb70ee0595203082dafcc9d/tensorflow_estimator-1.15.1-py2.py3-none-any.whl (503kB)
[K |████████████████████████████████| 512kB 41.9MB/s
[?25hRequirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (0.10.0)
Requirement already satisfied: wheel>=0.26; python_version >= "3" in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (0.35.1)
Requirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (3.12.4)
Requirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (1.32.0)
Requirement already satisfied: wrapt>=1.11.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (1.12.1)
Requirement already satisfied: numpy<1.19.0,>=1.16.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (1.18.5)
Collecting tensorboard<1.16.0,>=1.15.0
[?25l Downloading https://files.pythonhosted.org/packages/1e/e9/d3d747a97f7188f48aa5eda486907f3b345cd409f0a0850468ba867db246/tensorboard-1.15.0-py3-none-any.whl (3.8MB)
[K |████████████████████████████████| 3.8MB 39.6MB/s
[?25hRequirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from keras-applications>=1.0.8->tensorflow==1.15.4) (2.10.0)
Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf>=3.6.1->tensorflow==1.15.4) (50.3.0)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow==1.15.4) (3.2.2)
Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow==1.15.4) (1.0.1)
Requirement already satisfied: importlib-metadata; python_version < "3.8" in /usr/local/lib/python3.6/dist-packages (from markdown>=2.6.8->tensorboard<1.16.0,>=1.15.0->tensorflow==1.15.4) (2.0.0)
Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata; python_version < "3.8"->markdown>=2.6.8->tensorboard<1.16.0,>=1.15.0->tensorflow==1.15.4) (3.2.0)
Building wheels for collected packages: gast
Building wheel for gast (setup.py) ... [?25l[?25hdone
Created wheel for gast: filename=gast-0.2.2-cp36-none-any.whl size=7542 sha256=a55b678baf5d7fd287bc9ef15efbfa88a98fa4cd0b8b71a04f0bd501d4c5ea4d
Stored in directory: /root/.cache/pip/wheels/5c/2e/7e/a1d4d4fcebe6c381f378ce7743a3ced3699feb89bcfbdadadd
Successfully built gast
[31mERROR: tensorflow-probability 0.11.0 has requirement gast>=0.3.2, but you'll have gast 0.2.2 which is incompatible.[0m
Installing collected packages: keras-applications, gast, tensorflow-estimator, tensorboard, tensorflow
Found existing installation: gast 0.3.3
Uninstalling gast-0.3.3:
Successfully uninstalled gast-0.3.3
Found existing installation: tensorflow-estimator 2.3.0
Uninstalling tensorflow-estimator-2.3.0:
Successfully uninstalled tensorflow-estimator-2.3.0
Found existing installation: tensorboard 2.3.0
Uninstalling tensorboard-2.3.0:
Successfully uninstalled tensorboard-2.3.0
Found existing installation: tensorflow 2.3.0
Uninstalling tensorflow-2.3.0:
Successfully uninstalled tensorflow-2.3.0
Successfully installed gast-0.2.2 keras-applications-1.0.8 tensorboard-1.15.0 tensorflow-1.15.4 tensorflow-estimator-1.15.1
###Markdown
Import packages
###Code
import yfinance as yf
from stockstats import StockDataFrame as Sdf
import pandas as pd
import matplotlib.pyplot as plt
import gym
from stable_baselines import PPO2, DDPG, A2C, ACKTR, TD3
from stable_baselines import DDPG
from stable_baselines import A2C
from stable_baselines import SAC
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.common.policies import MlpPolicy
#Diable the warnings
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
Part 1: Download DataYahoo Finance is a website that provides stock data, financial news, financial reports, etc. All the data provided by Yahoo Finance is free.
###Code
# Download and save the data in a pandas DataFrame:
data_df = yf.download("AAPL", start="2009-01-01", end="2020-10-23")
data_df.shape
# reset the index, we want to use numbers instead of dates
data_df=data_df.reset_index()
data_df.head()
data_df.columns
# convert the column names to standardized names
data_df.columns = ['datadate','open','high','low','close','adjcp','volume']
# save the data to a csv file in your current folder
#data_df.to_csv('AAPL_2009_2020.csv')
###Output
_____no_output_____
###Markdown
Part 2: Preprocess DataData preprocessing is a crucial step for training a high quality machine learning model. We need to check for missing data and do feature engineering in order to convert the data into a model-ready state.
###Code
# check missing data
data_df.isnull().values.any()
# calculate technical indicators like MACD
stock = Sdf.retype(data_df.copy())
# we need to use adjusted close price instead of close price
stock['close'] = stock['adjcp']
data_df['macd'] = stock['macd']
# check missing data again
data_df.isnull().values.any()
data_df.head()
#data_df=data_df.fillna(method='bfill')
# Note that I always use a copy of the original data to try it track step by step.
data_clean = data_df.copy()
data_clean.head()
data_clean.tail()
###Output
_____no_output_____
###Markdown
Part 3: Design EnvironmentConsidering the stochastic and interactive nature of the automated stock trading tasks, a financial task is modeled as a Markov Decision Process (MDP) problem. The training process involves observing stock price change, taking an action and reward's calculation to have the agent adjusting its strategy accordingly. By interacting with the environment, the trading agent will derive a trading strategy with the maximized rewards as time proceeds.Our trading environments, based on OpenAI Gym framework, simulate live stock markets with real market data according to the principle of time-driven simulation.The action space describes the allowed actions that the agent interacts with the environment. Normally, action a includes three actions: {-1, 0, 1}, where -1, 0, 1 represent selling, holding, and buying one share. Also, an action can be carried upon multiple shares. We use an action space {-k,…,-1, 0, 1, …, k}, where k denotes the number of shares to buy and -k denotes the number of shares to sell. For example, "Buy 10 shares of AAPL" or "Sell 10 shares of AAPL" are 10 or -10, respectively. The continuous action space needs to be normalized to [-1, 1], since the policy is defined on a Gaussian distribution, which needs to be normalized and symmetric.
###Code
import numpy as np
import pandas as pd
from gym.utils import seeding
import gym
from gym import spaces
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# Global variables
HMAX_NORMALIZE = 200
INITIAL_ACCOUNT_BALANCE=100000
STOCK_DIM = 1
# transaction fee: 1/1000 reasonable percentage
TRANSACTION_FEE_PERCENT = 0.001
# REWARD_SCALING = 1e-3
class SingleStockEnv(gym.Env):
"""A stock trading environment for OpenAI gym"""
metadata = {'render.modes': ['human']}
def __init__(self, df,day = 0):
#super(StockEnv, self).__init__()
# date increment
self.day = day
self.df = df
# action_space normalization and the shape is STOCK_DIM
self.action_space = spaces.Box(low = -1, high = 1,shape = (STOCK_DIM,))
# Shape = 4: [Current Balance]+[prices]+[owned shares] +[macd]
self.observation_space = spaces.Box(low=0, high=np.inf, shape = (4,))
# load data from a pandas dataframe
self.data = self.df.loc[self.day,:]
# termination
self.terminal = False
# save the total number of trades
self.trades = 0
# initalize state
self.state = [INITIAL_ACCOUNT_BALANCE] + \
[self.data.adjcp] + \
[0]*STOCK_DIM + \
[self.data.macd]
# initialize reward and cost
self.reward = 0
self.cost = 0
# memorize the total value, total rewards
self.asset_memory = [INITIAL_ACCOUNT_BALANCE]
self.rewards_memory = []
def _sell_stock(self, index, action):
# perform sell action based on the sign of the action
if self.state[index+STOCK_DIM+1] > 0:
# update balance
self.state[0] += \
self.state[index+1]*min(abs(action),self.state[index+STOCK_DIM+1]) * \
(1- TRANSACTION_FEE_PERCENT)
# update held shares
self.state[index+STOCK_DIM+1] -= min(abs(action), self.state[index+STOCK_DIM+1])
# update transaction costs
self.cost +=self.state[index+1]*min(abs(action),self.state[index+STOCK_DIM+1]) * \
TRANSACTION_FEE_PERCENT
self.trades+=1
else:
pass
def _buy_stock(self, index, action):
# perform buy action based on the sign of the action
available_amount = self.state[0] // self.state[index+1]
#update balance
self.state[0] -= self.state[index+1]*min(available_amount, action)* \
(1+ TRANSACTION_FEE_PERCENT)
# update held shares
self.state[index+STOCK_DIM+1] += min(available_amount, action)
# update transaction costs
self.cost+=self.state[index+1]*min(available_amount, action)* \
TRANSACTION_FEE_PERCENT
self.trades+=1
def step(self, actions):
self.terminal = self.day >= len(self.df.index.unique())-1
if self.terminal:
plt.plot(self.asset_memory,'r')
plt.savefig('account_value.png')
plt.close()
end_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(STOCK_DIM+1)])*np.array(self.state[(STOCK_DIM+1):(STOCK_DIM*2+1)]))
print("previous_total_asset:{}".format(self.asset_memory[0]))
print("end_total_asset:{}".format(end_total_asset))
df_total_value = pd.DataFrame(self.asset_memory)
df_total_value.to_csv('account_value.csv')
print("total_reward:{}".format(self.state[0]+sum(np.array(self.state[1:(STOCK_DIM+1)])*np.array(self.state[(STOCK_DIM+1):(STOCK_DIM*2+1)]))- INITIAL_ACCOUNT_BALANCE ))
print("total_cost: ", self.cost)
print("total trades: ", self.trades)
df_total_value.columns = ['account_value']
df_total_value['daily_return']=df_total_value.pct_change(1)
if df_total_value['daily_return'].std()!=0:
sharpe = (252**0.5)*df_total_value['daily_return'].mean()/ \
df_total_value['daily_return'].std()
print("Sharpe: ",sharpe)
df_rewards = pd.DataFrame(self.rewards_memory)
df_rewards.to_csv('account_rewards.csv')
return self.state, self.reward, self.terminal,{}
else:
# actions are the shares we need to buy, hold, or sell
actions = actions * HMAX_NORMALIZE
# calculate begining total asset
begin_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(STOCK_DIM+1)])*np.array(self.state[(STOCK_DIM+1):(STOCK_DIM*2+1)]))
# perform buy or sell action
argsort_actions = np.argsort(actions)
sell_index = argsort_actions[:np.where(actions < 0)[0].shape[0]]
buy_index = argsort_actions[::-1][:np.where(actions > 0)[0].shape[0]]
for index in sell_index:
# print('take sell action'.format(actions[index]))
self._sell_stock(index, actions[index])
for index in buy_index:
# print('take buy action: {}'.format(actions[index]))
self._buy_stock(index, actions[index])
# update data, walk a step s'
self.day += 1
self.data = self.df.loc[self.day,:]
#load next state
self.state = [self.state[0]] + \
[self.data.adjcp] + \
list(self.state[(STOCK_DIM+1):(STOCK_DIM*2+1)]) +\
[self.data.macd]
# calculate the end total asset
end_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(STOCK_DIM+1)])*np.array(self.state[(STOCK_DIM+1):(STOCK_DIM*2+1)]))
self.reward = end_total_asset - begin_total_asset
self.rewards_memory.append(self.reward)
#self.reward = self.reward * REWARD_SCALING
self.asset_memory.append(end_total_asset)
return self.state, self.reward, self.terminal, {}
def reset(self):
self.asset_memory = [INITIAL_ACCOUNT_BALANCE]
self.day = 0
self.data = self.df.loc[self.day,:]
self.cost = 0
self.trades = 0
self.terminal = False
self.rewards_memory = []
#initiate state
self.state = [INITIAL_ACCOUNT_BALANCE] + \
[self.data.adjcp] + \
[0]*STOCK_DIM + \
[self.data.macd]
return self.state
def render(self, mode='human'):
return self.state
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
###Output
_____no_output_____
###Markdown
Part 4: Implement DRL AlgorithmsThe implementation of the DRL algorithms are based on OpenAI Baselines and Stable Baselines. Stable Baselines is a fork of OpenAI Baselines, with a major structural refactoring, and code cleanups. Training data: 2009-01-01 to 2018-12-31
###Code
train = data_clean[(data_clean.datadate>='2009-01-01') & (data_clean.datadate<'2019-01-01')]
# the index needs to start from 0
train=train.reset_index(drop=True)
train.head()
###Output
_____no_output_____
###Markdown
Model Training: 4 models, PPO A2C, DDPG, TD3 Model 1: PPO
###Code
#tensorboard --logdir ./single_stock_tensorboard/
env_train = DummyVecEnv([lambda: SingleStockEnv(train)])
model_ppo = PPO2('MlpPolicy', env_train, tensorboard_log="./single_stock_trading_2_tensorboard/")
model_ppo.learn(total_timesteps=100000,tb_log_name="run_aapl_ppo")
#model.save('AAPL_ppo_100k')
###Output
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/common/tf_util.py:191: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/common/tf_util.py:200: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/common/policies.py:116: The name tf.variable_scope is deprecated. Please use tf.compat.v1.variable_scope instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/common/input.py:25: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/common/policies.py:561: flatten (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.
Instructions for updating:
Use keras.layers.flatten instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/layers/core.py:332: Layer.apply (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.
Instructions for updating:
Please use `layer.__call__` method instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/common/tf_layers.py:123: The name tf.get_variable is deprecated. Please use tf.compat.v1.get_variable instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/common/distributions.py:418: The name tf.random_normal is deprecated. Please use tf.random.normal instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/ppo2/ppo2.py:190: The name tf.summary.scalar is deprecated. Please use tf.compat.v1.summary.scalar instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/ppo2/ppo2.py:198: The name tf.trainable_variables is deprecated. Please use tf.compat.v1.trainable_variables instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/math_grad.py:1424: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.where in 2.0, which has the same broadcast rule as np.where
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/ppo2/ppo2.py:206: The name tf.train.AdamOptimizer is deprecated. Please use tf.compat.v1.train.AdamOptimizer instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/ppo2/ppo2.py:240: The name tf.global_variables_initializer is deprecated. Please use tf.compat.v1.global_variables_initializer instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/ppo2/ppo2.py:242: The name tf.summary.merge_all is deprecated. Please use tf.compat.v1.summary.merge_all instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/common/base_class.py:1169: The name tf.summary.FileWriter is deprecated. Please use tf.compat.v1.summary.FileWriter instead.
previous_total_asset:100000
end_total_asset:156098.18901026587
total_reward:56098.189010265865
total_cost: 6341.287377798898
total trades: 2491
Sharpe: 0.4187558739862496
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/common/tf_util.py:502: The name tf.Summary is deprecated. Please use tf.compat.v1.Summary instead.
previous_total_asset:100000
end_total_asset:94446.32340155944
total_reward:-5553.676598440565
total_cost: 2929.111106588759
total trades: 1887
Sharpe: -0.4449912464199369
previous_total_asset:100000
end_total_asset:106926.53386862631
total_reward:6926.533868626313
total_cost: 5599.619928583559
total trades: 2210
Sharpe: 0.13181291830866868
previous_total_asset:100000
end_total_asset:210752.72119495703
total_reward:110752.72119495703
total_cost: 6167.86721222417
total trades: 2436
Sharpe: 0.5912451258666666
previous_total_asset:100000
end_total_asset:266216.10048735235
total_reward:166216.10048735235
total_cost: 6268.2672412182
total trades: 2506
Sharpe: 0.6394795415623377
previous_total_asset:100000
end_total_asset:553507.7849352992
total_reward:453507.7849352992
total_cost: 6205.074570567108
total trades: 2514
Sharpe: 0.869382821594984
previous_total_asset:100000
end_total_asset:525815.480627327
total_reward:425815.480627327
total_cost: 6268.937668332546
total trades: 2515
Sharpe: 0.8563922247597354
previous_total_asset:100000
end_total_asset:579948.3273711266
total_reward:479948.3273711266
total_cost: 6264.915418211092
total trades: 2515
Sharpe: 0.8812822887651185
previous_total_asset:100000
end_total_asset:601018.8610150499
total_reward:501018.86101504986
total_cost: 6071.15111291963
total trades: 2514
Sharpe: 0.9042371048628427
previous_total_asset:100000
end_total_asset:501034.1646295194
total_reward:401034.1646295194
total_cost: 6162.337433171433
total trades: 2508
Sharpe: 0.830281620872439
previous_total_asset:100000
end_total_asset:457905.0789816795
total_reward:357905.0789816795
total_cost: 6250.532500542996
total trades: 2514
Sharpe: 0.8257661702659089
previous_total_asset:100000
end_total_asset:265638.6391078079
total_reward:165638.6391078079
total_cost: 6734.51020678946
total trades: 2509
Sharpe: 0.7966543085095101
previous_total_asset:100000
end_total_asset:176177.60613806313
total_reward:76177.60613806313
total_cost: 5210.800510190631
total trades: 2372
Sharpe: 0.6105657641565995
previous_total_asset:100000
end_total_asset:119068.00619850191
total_reward:19068.006198501913
total_cost: 5449.554538212252
total trades: 2340
Sharpe: 0.4782932891593345
previous_total_asset:100000
end_total_asset:212956.67157340347
total_reward:112956.67157340347
total_cost: 6368.901746125352
total trades: 2475
Sharpe: 0.6871472400668093
previous_total_asset:100000
end_total_asset:395805.73324403726
total_reward:295805.73324403726
total_cost: 6537.4196105855235
total trades: 2511
Sharpe: 0.8052338342289653
previous_total_asset:100000
end_total_asset:604812.5459749429
total_reward:504812.54597494286
total_cost: 6218.48193137824
total trades: 2513
Sharpe: 0.9101661298526685
previous_total_asset:100000
end_total_asset:582116.5546294623
total_reward:482116.5546294623
total_cost: 6211.65512132105
total trades: 2515
Sharpe: 0.9192984163452886
previous_total_asset:100000
end_total_asset:526174.6858831241
total_reward:426174.6858831241
total_cost: 6416.375233592592
total trades: 2515
Sharpe: 0.8856642983091633
previous_total_asset:100000
end_total_asset:553836.0514161409
total_reward:453836.05141614086
total_cost: 6567.770799179734
total trades: 2515
Sharpe: 0.8855059161955452
previous_total_asset:100000
end_total_asset:616036.9849392112
total_reward:516036.98493921116
total_cost: 6652.36958320929
total trades: 2512
Sharpe: 0.9329275065056303
previous_total_asset:100000
end_total_asset:568439.2268575617
total_reward:468439.22685756173
total_cost: 6487.975412838857
total trades: 2515
Sharpe: 0.9065578971932241
previous_total_asset:100000
end_total_asset:541432.7291863837
total_reward:441432.72918638366
total_cost: 6154.1131312864945
total trades: 2515
Sharpe: 0.875571568948455
previous_total_asset:100000
end_total_asset:769749.4838137159
total_reward:669749.4838137159
total_cost: 5731.190934403671
total trades: 2515
Sharpe: 0.9634334035816738
previous_total_asset:100000
end_total_asset:746253.3798512011
total_reward:646253.3798512011
total_cost: 5540.084347681433
total trades: 2514
Sharpe: 0.9554523241222211
previous_total_asset:100000
end_total_asset:702178.8201897977
total_reward:602178.8201897977
total_cost: 5961.959511756423
total trades: 2515
Sharpe: 0.9397244292873406
previous_total_asset:100000
end_total_asset:741096.6667565618
total_reward:641096.6667565618
total_cost: 5986.884645727058
total trades: 2514
Sharpe: 0.9557487033016611
previous_total_asset:100000
end_total_asset:729812.9256538266
total_reward:629812.9256538266
total_cost: 6011.705168113822
total trades: 2513
Sharpe: 0.9538379004377312
previous_total_asset:100000
end_total_asset:758236.5753620398
total_reward:658236.5753620398
total_cost: 6181.802640711599
total trades: 2513
Sharpe: 0.9636495498888424
previous_total_asset:100000
end_total_asset:772840.3734990709
total_reward:672840.3734990709
total_cost: 5596.33548413703
total trades: 2512
Sharpe: 0.9681238454395429
previous_total_asset:100000
end_total_asset:741677.1106373993
total_reward:641677.1106373993
total_cost: 5955.182698275548
total trades: 2514
Sharpe: 0.9595399727490882
previous_total_asset:100000
end_total_asset:764805.0461015248
total_reward:664805.0461015248
total_cost: 5891.292651756751
total trades: 2515
Sharpe: 0.9661904649783544
previous_total_asset:100000
end_total_asset:789673.3483401105
total_reward:689673.3483401105
total_cost: 5926.372793171938
total trades: 2515
Sharpe: 0.9741742994885249
previous_total_asset:100000
end_total_asset:741254.787990579
total_reward:641254.787990579
total_cost: 5821.799027481282
total trades: 2515
Sharpe: 0.9576394615424657
previous_total_asset:100000
end_total_asset:731611.5428951988
total_reward:631611.5428951988
total_cost: 6097.654599630185
total trades: 2515
Sharpe: 0.9514027303396922
previous_total_asset:100000
end_total_asset:724101.1807242595
total_reward:624101.1807242595
total_cost: 6112.808160692618
total trades: 2513
Sharpe: 0.9468299402074484
previous_total_asset:100000
end_total_asset:759113.1677076282
total_reward:659113.1677076282
total_cost: 6216.473084561016
total trades: 2515
Sharpe: 0.9682276096737484
previous_total_asset:100000
end_total_asset:740835.2222805698
total_reward:640835.2222805698
total_cost: 5994.8131451707895
total trades: 2515
Sharpe: 0.9614725290080312
previous_total_asset:100000
end_total_asset:757197.6833960349
total_reward:657197.6833960349
total_cost: 5919.665946050203
total trades: 2515
Sharpe: 0.964978933627224
###Markdown
Model 2: DDPG
###Code
#tensorboard --logdir ./single_stock_tensorboard/
env_train = DummyVecEnv([lambda: SingleStockEnv(train)])
model_ddpg = DDPG('MlpPolicy', env_train, tensorboard_log="./single_stock_trading_2_tensorboard/")
model_ddpg.learn(total_timesteps=100000, tb_log_name="run_aapl_ddpg")
#model.save('AAPL_ddpg_50k')
###Output
previous_total_asset:100000
end_total_asset:381113.46135500714
total_reward:281113.46135500714
total_cost: 99.90638327732879
total trades: 2250
Sharpe: 0.7026063856823678
previous_total_asset:100000
end_total_asset:1080609.6161442325
total_reward:980609.6161442325
total_cost: 99.8974009731182
total trades: 2515
Sharpe: 1.0705602895308748
previous_total_asset:100000
end_total_asset:1080615.5633120066
total_reward:980615.5633120066
total_cost: 99.89794435650313
total trades: 2515
Sharpe: 1.0705613143272277
previous_total_asset:100000
end_total_asset:1080615.5633120066
total_reward:980615.5633120066
total_cost: 99.89794435650313
total trades: 2515
Sharpe: 1.0705613143272277
previous_total_asset:100000
end_total_asset:1080615.5633120066
total_reward:980615.5633120066
total_cost: 99.89794435650313
total trades: 2515
Sharpe: 1.0705613143272277
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
###Markdown
Model 3: A2C
###Code
#tensorboard --logdir ./single_stock_tensorboard/
env_train = DummyVecEnv([lambda: SingleStockEnv(train)])
model_a2c = A2C('MlpPolicy', env_train, tensorboard_log="./single_stock_trading_2_tensorboard/")
model_a2c.learn(total_timesteps=100000,tb_log_name="run_aapl_a2c")
#model.save('AAPL_a2c_50k')
###Output
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/a2c/a2c.py:184: The name tf.train.RMSPropOptimizer is deprecated. Please use tf.compat.v1.train.RMSPropOptimizer instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/training/rmsprop.py:119: calling Ones.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.
Instructions for updating:
Call initializer instance with the dtype argument instead of passing it to the constructor
previous_total_asset:100000
end_total_asset:143568.45233873036
total_reward:43568.45233873036
total_cost: 6387.934081530226
total trades: 2456
Sharpe: 0.4585063869500929
previous_total_asset:100000
end_total_asset:471519.15020942007
total_reward:371519.15020942007
total_cost: 3212.7233202738003
total trades: 2510
Sharpe: 0.8100652990954699
previous_total_asset:100000
end_total_asset:711997.8256528199
total_reward:611997.8256528199
total_cost: 1962.9036105102316
total trades: 2509
Sharpe: 0.9281065879650406
previous_total_asset:100000
end_total_asset:813730.9830055628
total_reward:713730.9830055628
total_cost: 5242.129027291222
total trades: 2515
Sharpe: 0.9784477406469194
previous_total_asset:100000
end_total_asset:763906.3490314147
total_reward:663906.3490314147
total_cost: 4817.21023018733
total trades: 2513
Sharpe: 0.9614375402950348
previous_total_asset:100000
end_total_asset:666065.8786650962
total_reward:566065.8786650962
total_cost: 4676.52369838125
total trades: 2515
Sharpe: 0.9251937168254919
previous_total_asset:100000
end_total_asset:828059.3046764581
total_reward:728059.3046764581
total_cost: 2756.847100758158
total trades: 2515
Sharpe: 0.9794847255543758
previous_total_asset:100000
end_total_asset:820407.5433832017
total_reward:720407.5433832017
total_cost: 3434.9401856560908
total trades: 2515
Sharpe: 0.9770267066894319
previous_total_asset:100000
end_total_asset:767188.7455583132
total_reward:667188.7455583132
total_cost: 3297.0731466450547
total trades: 2515
Sharpe: 0.9538168460285256
previous_total_asset:100000
end_total_asset:778635.5451063968
total_reward:678635.5451063968
total_cost: 4326.622281863198
total trades: 2514
Sharpe: 0.9661988894055867
previous_total_asset:100000
end_total_asset:271013.7760203545
total_reward:171013.77602035447
total_cost: 6035.33971987824
total trades: 2511
Sharpe: 0.6001095053202498
previous_total_asset:100000
end_total_asset:693888.6297474115
total_reward:593888.6297474115
total_cost: 6725.654488365585
total trades: 2512
Sharpe: 0.9477727396021005
previous_total_asset:100000
end_total_asset:695857.6520784842
total_reward:595857.6520784842
total_cost: 6559.052958980748
total trades: 2515
Sharpe: 0.9570767009582097
previous_total_asset:100000
end_total_asset:665060.478724533
total_reward:565060.478724533
total_cost: 6428.776309423134
total trades: 2513
Sharpe: 1.0284509105263875
previous_total_asset:100000
end_total_asset:304313.7964066868
total_reward:204313.79640668677
total_cost: 6560.517864200142
total trades: 2463
Sharpe: 0.685670350587845
previous_total_asset:100000
end_total_asset:556665.7202818475
total_reward:456665.7202818475
total_cost: 6514.5848537590755
total trades: 2515
Sharpe: 0.918155329470793
previous_total_asset:100000
end_total_asset:786268.58362437
total_reward:686268.58362437
total_cost: 6518.325190759874
total trades: 2515
Sharpe: 1.0064220446414565
previous_total_asset:100000
end_total_asset:821071.183253775
total_reward:721071.183253775
total_cost: 6503.402347123818
total trades: 2515
Sharpe: 1.0411410205557572
previous_total_asset:100000
end_total_asset:925445.9804322764
total_reward:825445.9804322764
total_cost: 6784.712702513881
total trades: 2515
Sharpe: 1.0404912926059942
previous_total_asset:100000
end_total_asset:962517.7903138469
total_reward:862517.7903138469
total_cost: 5193.491431148093
total trades: 2515
Sharpe: 1.0444274794914112
previous_total_asset:100000
end_total_asset:972332.4928350974
total_reward:872332.4928350974
total_cost: 6897.381129094436
total trades: 2515
Sharpe: 1.0570876131621412
previous_total_asset:100000
end_total_asset:945060.4014321667
total_reward:845060.4014321667
total_cost: 6735.608970433232
total trades: 2515
Sharpe: 1.0487443970392183
previous_total_asset:100000
end_total_asset:918670.8822703442
total_reward:818670.8822703442
total_cost: 6680.795828343729
total trades: 2515
Sharpe: 1.0403145093565656
previous_total_asset:100000
end_total_asset:932346.3690885586
total_reward:832346.3690885586
total_cost: 6617.01394678098
total trades: 2515
Sharpe: 1.047549236579285
previous_total_asset:100000
end_total_asset:929525.1868265397
total_reward:829525.1868265397
total_cost: 7027.402624023227
total trades: 2515
Sharpe: 1.0488654810492868
previous_total_asset:100000
end_total_asset:908426.3718980909
total_reward:808426.3718980909
total_cost: 6743.710549693906
total trades: 2515
Sharpe: 1.0533517402923307
previous_total_asset:100000
end_total_asset:837756.2032604109
total_reward:737756.2032604109
total_cost: 7696.352012277036
total trades: 2515
Sharpe: 1.0367535626224442
previous_total_asset:100000
end_total_asset:863230.8268439324
total_reward:763230.8268439324
total_cost: 7522.8635206924
total trades: 2515
Sharpe: 1.024741392144145
previous_total_asset:100000
end_total_asset:1068293.4052346733
total_reward:968293.4052346733
total_cost: 3950.7904742669434
total trades: 2515
Sharpe: 1.0696609048755705
previous_total_asset:100000
end_total_asset:1024396.5737258486
total_reward:924396.5737258486
total_cost: 7377.394023182965
total trades: 2515
Sharpe: 1.0612899463024488
previous_total_asset:100000
end_total_asset:985937.795078817
total_reward:885937.795078817
total_cost: 7331.481581167244
total trades: 2515
Sharpe: 1.0531098337914353
previous_total_asset:100000
end_total_asset:1039037.4584408774
total_reward:939037.4584408774
total_cost: 7189.234271772797
total trades: 2515
Sharpe: 1.060455776955667
previous_total_asset:100000
end_total_asset:1063517.41348354
total_reward:963517.4134835401
total_cost: 4901.12351607559
total trades: 2515
Sharpe: 1.0676215370687918
previous_total_asset:100000
end_total_asset:1031403.3840595726
total_reward:931403.3840595726
total_cost: 7692.226820285462
total trades: 2515
Sharpe: 1.0678040027978382
previous_total_asset:100000
end_total_asset:961527.0898816795
total_reward:861527.0898816795
total_cost: 7209.059959788929
total trades: 2515
Sharpe: 1.0537933358617513
previous_total_asset:100000
end_total_asset:1048865.3337775513
total_reward:948865.3337775513
total_cost: 3580.3174852377883
total trades: 2515
Sharpe: 1.0628302218014327
previous_total_asset:100000
end_total_asset:1072529.5638042036
total_reward:972529.5638042036
total_cost: 1634.1296899727581
total trades: 2515
Sharpe: 1.0683705166511808
previous_total_asset:100000
end_total_asset:1033626.8291941078
total_reward:933626.8291941078
total_cost: 6125.992164563634
total trades: 2515
Sharpe: 1.067705030388622
previous_total_asset:100000
end_total_asset:821092.0955071684
total_reward:721092.0955071684
total_cost: 5975.377095007534
total trades: 2515
Sharpe: 1.0242724391117501
###Markdown
Model 4: TD3
###Code
#tensorboard --logdir ./single_stock_tensorboard/
#DQN<DDPG<TD3
env_train = DummyVecEnv([lambda: SingleStockEnv(train)])
model_td3 = TD3('MlpPolicy', env_train, tensorboard_log="./single_stock_trading_2_tensorboard/")
model_td3.learn(total_timesteps=100000,tb_log_name="run_aapl_td3")
#model.save('AAPL_td3_50k')
###Output
previous_total_asset:100000
end_total_asset:778869.7048272111
total_reward:678869.7048272111
total_cost: 124.57847519610326
total trades: 2504
Sharpe: 0.9529387392421051
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
###Markdown
Testing data
###Code
test = data_clean[(data_clean.datadate>='2019-01-01') ]
# the index needs to start from 0
test=test.reset_index(drop=True)
test.head()
###Output
_____no_output_____
###Markdown
TradingAssume that we have $100,000 initial capital at 2019-01-01. We use the TD3 model to trade AAPL.
###Code
model = model_td3
env_test = DummyVecEnv([lambda: SingleStockEnv(test)])
obs_test = env_test.reset()
print("==============Model Prediction===========")
for i in range(len(test.index.unique())):
action, _states = model.predict(obs_test)
obs_test, rewards, dones, info = env_test.step(action)
env_test.render()
###Output
==============Model Prediction===========
previous_total_asset:100000
end_total_asset:310696.8490824127
total_reward:210696.84908241272
total_cost: 99.87627464294434
total trades: 456
Sharpe: 1.864143095949389
###Markdown
Part 5: Backtest Our StrategyFor simplicity purposes, in the article, we just calculate the Sharpe ratio and the annual return manually.
###Code
def get_DRL_sharpe():
df_total_value=pd.read_csv('account_value.csv',index_col=0)
df_total_value.columns = ['account_value']
df_total_value['daily_return']=df_total_value.pct_change(1)
sharpe = (252**0.5)*df_total_value['daily_return'].mean()/ \
df_total_value['daily_return'].std()
annual_return = ((df_total_value['daily_return'].mean()+1)**252-1)*100
print("annual return: ", annual_return)
print("sharpe ratio: ", sharpe)
return df_total_value
def get_buy_and_hold_sharpe(test):
test['daily_return']=test['adjcp'].pct_change(1)
sharpe = (252**0.5)*test['daily_return'].mean()/ \
test['daily_return'].std()
annual_return = ((test['daily_return'].mean()+1)**252-1)*100
print("annual return: ", annual_return)
print("sharpe ratio: ", sharpe)
#return sharpe
df_total_value=get_DRL_sharpe()
get_buy_and_hold_sharpe(test)
DRL_cumulative_return = (df_total_value.account_value.pct_change(1)+1).cumprod()-1
buy_and_hold_cumulative_return = (test.adjcp.pct_change(1)+1).cumprod()-1
%matplotlib inline
fig, ax = plt.subplots(figsize=(12, 8))
plt.plot(test.datadate, DRL_cumulative_return, color='red',label = "DRL")
plt.plot(test.datadate, buy_and_hold_cumulative_return, label = "Buy & Hold")
plt.title("Cumulative Return for AAPL with Transaction Cost",size= 18)
plt.legend()
plt.rc('legend',fontsize=15)
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
###Output
_____no_output_____
###Markdown
Deep Reinforcement Learning for Stock Trading from Scratch: Single Stock TradingTutorials to use OpenAI DRL to trade single stock in one Jupyter Notebook | Presented at NeurIPS 2020: Deep RL Workshop* This blog is based on our paper: FinRL: A Deep Reinforcement Learning Library for Automated Stock Trading in Quantitative Finance, presented at NeurIPS 2020: Deep RL Workshop.* Check out medium blog for detailed explanations: * Please report any issues to our Github: https://github.com/AI4Finance-LLC/FinRL-Library/issues Python Package InstallationAs a first step we check if the additional packages needed are present, if not install them. * Yahoo Finance API* pandas* matplotlib* stockstats* OpenAI gym* stable-baselines* tensorflow
###Code
import pkg_resources
import pip
installedPackages = {pkg.key for pkg in pkg_resources.working_set}
required = {'yfinance', 'pandas', 'matplotlib', 'stockstats','stable-baselines','gym','tensorflow'}
missing = required - installedPackages
if missing:
!pip install yfinance
!pip install pandas
!pip install matplotlib
!pip install stockstats
!pip install gym
!pip install stable-baselines[mpi]
!pip install tensorflow==1.15.4
###Output
Collecting yfinance
Downloading https://files.pythonhosted.org/packages/7a/e8/b9d7104d3a4bf39924799067592d9e59119fcfc900a425a12e80a3123ec8/yfinance-0.1.55.tar.gz
Requirement already satisfied: pandas>=0.24 in /usr/local/lib/python3.6/dist-packages (from yfinance) (1.1.2)
Requirement already satisfied: numpy>=1.15 in /usr/local/lib/python3.6/dist-packages (from yfinance) (1.18.5)
Requirement already satisfied: requests>=2.20 in /usr/local/lib/python3.6/dist-packages (from yfinance) (2.23.0)
Requirement already satisfied: multitasking>=0.0.7 in /usr/local/lib/python3.6/dist-packages (from yfinance) (0.0.9)
Collecting lxml>=4.5.1
[?25l Downloading https://files.pythonhosted.org/packages/64/28/0b761b64ecbd63d272ed0e7a6ae6e4402fc37886b59181bfdf274424d693/lxml-4.6.1-cp36-cp36m-manylinux1_x86_64.whl (5.5MB)
[K |████████████████████████████████| 5.5MB 3.6MB/s
[?25hRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.24->yfinance) (2018.9)
Requirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.24->yfinance) (2.8.1)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20->yfinance) (3.0.4)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20->yfinance) (2.10)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20->yfinance) (1.24.3)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20->yfinance) (2020.6.20)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.7.3->pandas>=0.24->yfinance) (1.15.0)
Building wheels for collected packages: yfinance
Building wheel for yfinance (setup.py) ... [?25l[?25hdone
Created wheel for yfinance: filename=yfinance-0.1.55-py2.py3-none-any.whl size=22618 sha256=7d89f66131cbb58f2c7444aa8bc8b6b9353689d8e78f8339f3f83724acd099bd
Stored in directory: /root/.cache/pip/wheels/04/98/cc/2702a4242d60bdc14f48b4557c427ded1fe92aedf257d4565c
Successfully built yfinance
Installing collected packages: lxml, yfinance
Found existing installation: lxml 4.2.6
Uninstalling lxml-4.2.6:
Successfully uninstalled lxml-4.2.6
Successfully installed lxml-4.6.1 yfinance-0.1.55
Requirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (1.1.2)
Requirement already satisfied: numpy>=1.15.4 in /usr/local/lib/python3.6/dist-packages (from pandas) (1.18.5)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas) (2018.9)
Requirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.6/dist-packages (from pandas) (2.8.1)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.7.3->pandas) (1.15.0)
Requirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (3.2.2)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (2.4.7)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (0.10.0)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (1.2.0)
Requirement already satisfied: numpy>=1.11 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (1.18.5)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (2.8.1)
Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from cycler>=0.10->matplotlib) (1.15.0)
Collecting stockstats
Downloading https://files.pythonhosted.org/packages/32/41/d3828c5bc0a262cb3112a4024108a3b019c183fa3b3078bff34bf25abf91/stockstats-0.3.2-py2.py3-none-any.whl
Requirement already satisfied: pandas>=0.18.1 in /usr/local/lib/python3.6/dist-packages (from stockstats) (1.1.2)
Requirement already satisfied: numpy>=1.9.2 in /usr/local/lib/python3.6/dist-packages (from stockstats) (1.18.5)
Collecting int-date>=0.1.7
Downloading https://files.pythonhosted.org/packages/43/27/31803df15173ab341fe7548c14154b54227dfd8f630daa09a1c6e7db52f7/int_date-0.1.8-py2.py3-none-any.whl
Requirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.18.1->stockstats) (2.8.1)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.18.1->stockstats) (2018.9)
Requirement already satisfied: six>=1.9.0 in /usr/local/lib/python3.6/dist-packages (from int-date>=0.1.7->stockstats) (1.15.0)
Installing collected packages: int-date, stockstats
Successfully installed int-date-0.1.8 stockstats-0.3.2
Requirement already satisfied: gym in /usr/local/lib/python3.6/dist-packages (0.17.2)
Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from gym) (1.4.1)
Requirement already satisfied: cloudpickle<1.4.0,>=1.2.0 in /usr/local/lib/python3.6/dist-packages (from gym) (1.3.0)
Requirement already satisfied: pyglet<=1.5.0,>=1.4.0 in /usr/local/lib/python3.6/dist-packages (from gym) (1.5.0)
Requirement already satisfied: numpy>=1.10.4 in /usr/local/lib/python3.6/dist-packages (from gym) (1.18.5)
Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from pyglet<=1.5.0,>=1.4.0->gym) (0.16.0)
Collecting stable-baselines[mpi]
[?25l Downloading https://files.pythonhosted.org/packages/b0/48/d428b79bd4360727925f9fe34afeea7a9da381da3dc8748df834a349ad1d/stable_baselines-2.10.1-py3-none-any.whl (240kB)
[K |████████████████████████████████| 245kB 2.8MB/s
[?25hRequirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (from stable-baselines[mpi]) (3.2.2)
Requirement already satisfied: joblib in /usr/local/lib/python3.6/dist-packages (from stable-baselines[mpi]) (0.16.0)
Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from stable-baselines[mpi]) (1.18.5)
Requirement already satisfied: gym[atari,classic_control]>=0.11 in /usr/local/lib/python3.6/dist-packages (from stable-baselines[mpi]) (0.17.2)
Requirement already satisfied: opencv-python in /usr/local/lib/python3.6/dist-packages (from stable-baselines[mpi]) (4.1.2.30)
Requirement already satisfied: cloudpickle>=0.5.5 in /usr/local/lib/python3.6/dist-packages (from stable-baselines[mpi]) (1.3.0)
Requirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (from stable-baselines[mpi]) (1.1.2)
Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from stable-baselines[mpi]) (1.4.1)
Collecting mpi4py; extra == "mpi"
[?25l Downloading https://files.pythonhosted.org/packages/ec/8f/bbd8de5ba566dd77e408d8136e2bab7fdf2b97ce06cab830ba8b50a2f588/mpi4py-3.0.3.tar.gz (1.4MB)
[K |████████████████████████████████| 1.4MB 8.7MB/s
[?25hRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib->stable-baselines[mpi]) (0.10.0)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->stable-baselines[mpi]) (2.8.1)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->stable-baselines[mpi]) (2.4.7)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->stable-baselines[mpi]) (1.2.0)
Requirement already satisfied: pyglet<=1.5.0,>=1.4.0 in /usr/local/lib/python3.6/dist-packages (from gym[atari,classic_control]>=0.11->stable-baselines[mpi]) (1.5.0)
Requirement already satisfied: atari-py~=0.2.0; extra == "atari" in /usr/local/lib/python3.6/dist-packages (from gym[atari,classic_control]>=0.11->stable-baselines[mpi]) (0.2.6)
Requirement already satisfied: Pillow; extra == "atari" in /usr/local/lib/python3.6/dist-packages (from gym[atari,classic_control]>=0.11->stable-baselines[mpi]) (7.0.0)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas->stable-baselines[mpi]) (2018.9)
Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from cycler>=0.10->matplotlib->stable-baselines[mpi]) (1.15.0)
Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from pyglet<=1.5.0,>=1.4.0->gym[atari,classic_control]>=0.11->stable-baselines[mpi]) (0.16.0)
Building wheels for collected packages: mpi4py
Building wheel for mpi4py (setup.py) ... [?25l[?25hdone
Created wheel for mpi4py: filename=mpi4py-3.0.3-cp36-cp36m-linux_x86_64.whl size=2074501 sha256=a07c9149b0e6ef809d06789949fd347417c7b2912b70d8440ca3e9cb5e3dd987
Stored in directory: /root/.cache/pip/wheels/18/e0/86/2b713dd512199096012ceca61429e12b960888de59818871d6
Successfully built mpi4py
Installing collected packages: mpi4py, stable-baselines
Successfully installed mpi4py-3.0.3 stable-baselines-2.10.1
Collecting tensorflow==1.15.4
[?25l Downloading https://files.pythonhosted.org/packages/8e/64/7a19837dd54d3f53b1ce5ae346ab401dde9678e8f233220317000bfdb3e2/tensorflow-1.15.4-cp36-cp36m-manylinux2010_x86_64.whl (110.5MB)
[K |████████████████████████████████| 110.5MB 1.4MB/s
[?25hCollecting keras-applications>=1.0.8
[?25l Downloading https://files.pythonhosted.org/packages/71/e3/19762fdfc62877ae9102edf6342d71b28fbfd9dea3d2f96a882ce099b03f/Keras_Applications-1.0.8-py3-none-any.whl (50kB)
[K |████████████████████████████████| 51kB 5.0MB/s
[?25hCollecting gast==0.2.2
Downloading https://files.pythonhosted.org/packages/4e/35/11749bf99b2d4e3cceb4d55ca22590b0d7c2c62b9de38ac4a4a7f4687421/gast-0.2.2.tar.gz
Requirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (1.1.2)
Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (3.3.0)
Requirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (0.8.1)
Requirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (1.15.0)
Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (1.1.0)
Requirement already satisfied: google-pasta>=0.1.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (0.2.0)
Collecting tensorflow-estimator==1.15.1
[?25l Downloading https://files.pythonhosted.org/packages/de/62/2ee9cd74c9fa2fa450877847ba560b260f5d0fb70ee0595203082dafcc9d/tensorflow_estimator-1.15.1-py2.py3-none-any.whl (503kB)
[K |████████████████████████████████| 512kB 41.9MB/s
[?25hRequirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (0.10.0)
Requirement already satisfied: wheel>=0.26; python_version >= "3" in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (0.35.1)
Requirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (3.12.4)
Requirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (1.32.0)
Requirement already satisfied: wrapt>=1.11.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (1.12.1)
Requirement already satisfied: numpy<1.19.0,>=1.16.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow==1.15.4) (1.18.5)
Collecting tensorboard<1.16.0,>=1.15.0
[?25l Downloading https://files.pythonhosted.org/packages/1e/e9/d3d747a97f7188f48aa5eda486907f3b345cd409f0a0850468ba867db246/tensorboard-1.15.0-py3-none-any.whl (3.8MB)
[K |████████████████████████████████| 3.8MB 39.6MB/s
[?25hRequirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from keras-applications>=1.0.8->tensorflow==1.15.4) (2.10.0)
Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf>=3.6.1->tensorflow==1.15.4) (50.3.0)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow==1.15.4) (3.2.2)
Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow==1.15.4) (1.0.1)
Requirement already satisfied: importlib-metadata; python_version < "3.8" in /usr/local/lib/python3.6/dist-packages (from markdown>=2.6.8->tensorboard<1.16.0,>=1.15.0->tensorflow==1.15.4) (2.0.0)
Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata; python_version < "3.8"->markdown>=2.6.8->tensorboard<1.16.0,>=1.15.0->tensorflow==1.15.4) (3.2.0)
Building wheels for collected packages: gast
Building wheel for gast (setup.py) ... [?25l[?25hdone
Created wheel for gast: filename=gast-0.2.2-cp36-none-any.whl size=7542 sha256=a55b678baf5d7fd287bc9ef15efbfa88a98fa4cd0b8b71a04f0bd501d4c5ea4d
Stored in directory: /root/.cache/pip/wheels/5c/2e/7e/a1d4d4fcebe6c381f378ce7743a3ced3699feb89bcfbdadadd
Successfully built gast
[31mERROR: tensorflow-probability 0.11.0 has requirement gast>=0.3.2, but you'll have gast 0.2.2 which is incompatible.[0m
Installing collected packages: keras-applications, gast, tensorflow-estimator, tensorboard, tensorflow
Found existing installation: gast 0.3.3
Uninstalling gast-0.3.3:
Successfully uninstalled gast-0.3.3
Found existing installation: tensorflow-estimator 2.3.0
Uninstalling tensorflow-estimator-2.3.0:
Successfully uninstalled tensorflow-estimator-2.3.0
Found existing installation: tensorboard 2.3.0
Uninstalling tensorboard-2.3.0:
Successfully uninstalled tensorboard-2.3.0
Found existing installation: tensorflow 2.3.0
Uninstalling tensorflow-2.3.0:
Successfully uninstalled tensorflow-2.3.0
Successfully installed gast-0.2.2 keras-applications-1.0.8 tensorboard-1.15.0 tensorflow-1.15.4 tensorflow-estimator-1.15.1
###Markdown
Import packages
###Code
import yfinance as yf
from stockstats import StockDataFrame as Sdf
import pandas as pd
import matplotlib.pyplot as plt
import gym
from stable_baselines import PPO2, DDPG, A2C, ACKTR, TD3
from stable_baselines import DDPG
from stable_baselines import A2C
from stable_baselines import SAC
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.common.policies import MlpPolicy
#Diable the warnings
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
Part 1: Download DataYahoo Finance is a website that provides stock data, financial news, financial reports, etc. All the data provided by Yahoo Finance is free.
###Code
# Download and save the data in a pandas DataFrame:
data_df = yf.download("AAPL", start="2009-01-01", end="2020-10-23")
data_df.shape
# reset the index, we want to use numbers instead of dates
data_df=data_df.reset_index()
data_df.head()
data_df.columns
# convert the column names to standardized names
data_df.columns = ['datadate','open','high','low','close','adjcp','volume']
# save the data to a csv file in your current folder
#data_df.to_csv('AAPL_2009_2020.csv')
###Output
_____no_output_____
###Markdown
Part 2: Preprocess DataData preprocessing is a crucial step for training a high quality machine learning model. We need to check for missing data and do feature engineering in order to convert the data into a model-ready state.
###Code
# check missing data
data_df.isnull().values.any()
# calculate technical indicators like MACD
stock = Sdf.retype(data_df.copy())
# we need to use adjusted close price instead of close price
stock['close'] = stock['adjcp']
data_df['macd'] = stock['macd']
# check missing data again
data_df.isnull().values.any()
data_df.head()
#data_df=data_df.fillna(method='bfill')
# Note that I always use a copy of the original data to try it track step by step.
data_clean = data_df.copy()
data_clean.head()
data_clean.tail()
###Output
_____no_output_____
###Markdown
Part 3: Design EnvironmentConsidering the stochastic and interactive nature of the automated stock trading tasks, a financial task is modeled as a Markov Decision Process (MDP) problem. The training process involves observing stock price change, taking an action and reward's calculation to have the agent adjusting its strategy accordingly. By interacting with the environment, the trading agent will derive a trading strategy with the maximized rewards as time proceeds.Our trading environments, based on OpenAI Gym framework, simulate live stock markets with real market data according to the principle of time-driven simulation.The action space describes the allowed actions that the agent interacts with the environment. Normally, action a includes three actions: {-1, 0, 1}, where -1, 0, 1 represent selling, holding, and buying one share. Also, an action can be carried upon multiple shares. We use an action space {-k,…,-1, 0, 1, …, k}, where k denotes the number of shares to buy and -k denotes the number of shares to sell. For example, "Buy 10 shares of AAPL" or "Sell 10 shares of AAPL" are 10 or -10, respectively. The continuous action space needs to be normalized to [-1, 1], since the policy is defined on a Gaussian distribution, which needs to be normalized and symmetric.
###Code
import numpy as np
import pandas as pd
from gym.utils import seeding
import gym
from gym import spaces
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# Global variables
HMAX_NORMALIZE = 200
INITIAL_ACCOUNT_BALANCE=100000
STOCK_DIM = 1
# transaction fee: 1/1000 reasonable percentage
TRANSACTION_FEE_PERCENT = 0.001
# REWARD_SCALING = 1e-3
class SingleStockEnv(gym.Env):
"""A stock trading environment for OpenAI gym"""
metadata = {'render.modes': ['human']}
def __init__(self, df,day = 0):
#super(StockEnv, self).__init__()
# date increment
self.day = day
self.df = df
# action_space normalization and the shape is STOCK_DIM
self.action_space = spaces.Box(low = -1, high = 1,shape = (STOCK_DIM,))
# Shape = 4: [Current Balance]+[prices]+[owned shares] +[macd]
self.observation_space = spaces.Box(low=0, high=np.inf, shape = (4,))
# load data from a pandas dataframe
self.data = self.df.loc[self.day,:]
# termination
self.terminal = False
# save the total number of trades
self.trades = 0
# initalize state
self.state = [INITIAL_ACCOUNT_BALANCE] + \
[self.data.adjcp] + \
[0]*STOCK_DIM + \
[self.data.macd]
# initialize reward and cost
self.reward = 0
self.cost = 0
# memorize the total value, total rewards
self.asset_memory = [INITIAL_ACCOUNT_BALANCE]
self.rewards_memory = []
def _sell_stock(self, index, action):
# perform sell action based on the sign of the action
if self.state[index+STOCK_DIM+1] > 0:
# update balance
self.state[0] += \
self.state[index+1]*min(abs(action),self.state[index+STOCK_DIM+1]) * \
(1- TRANSACTION_FEE_PERCENT)
# update held shares
self.state[index+STOCK_DIM+1] -= min(abs(action), self.state[index+STOCK_DIM+1])
# update transaction costs
self.cost +=self.state[index+1]*min(abs(action),self.state[index+STOCK_DIM+1]) * \
TRANSACTION_FEE_PERCENT
self.trades+=1
else:
pass
def _buy_stock(self, index, action):
# perform buy action based on the sign of the action
available_amount = self.state[0] // self.state[index+1]
#update balance
self.state[0] -= self.state[index+1]*min(available_amount, action)* \
(1+ TRANSACTION_FEE_PERCENT)
# update held shares
self.state[index+STOCK_DIM+1] += min(available_amount, action)
# update transaction costs
self.cost+=self.state[index+1]*min(available_amount, action)* \
TRANSACTION_FEE_PERCENT
self.trades+=1
def step(self, actions):
self.terminal = self.day >= len(self.df.index.unique())-1
if self.terminal:
plt.plot(self.asset_memory,'r')
plt.savefig('account_value.png')
plt.close()
end_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(STOCK_DIM+1)])*np.array(self.state[(STOCK_DIM+1):(STOCK_DIM*2+1)]))
print("previous_total_asset:{}".format(self.asset_memory[0]))
print("end_total_asset:{}".format(end_total_asset))
df_total_value = pd.DataFrame(self.asset_memory)
df_total_value.to_csv('account_value.csv')
print("total_reward:{}".format(self.state[0]+sum(np.array(self.state[1:(STOCK_DIM+1)])*np.array(self.state[(STOCK_DIM+1):(STOCK_DIM*2+1)]))- INITIAL_ACCOUNT_BALANCE ))
print("total_cost: ", self.cost)
print("total trades: ", self.trades)
df_total_value.columns = ['account_value']
df_total_value['daily_return']=df_total_value.pct_change(1)
if df_total_value['daily_return'].std()!=0:
sharpe = (252**0.5)*df_total_value['daily_return'].mean()/ \
df_total_value['daily_return'].std()
print("Sharpe: ",sharpe)
df_rewards = pd.DataFrame(self.rewards_memory)
df_rewards.to_csv('account_rewards.csv')
return self.state, self.reward, self.terminal,{}
else:
# actions are the shares we need to buy, hold, or sell
actions = actions * HMAX_NORMALIZE
# calculate begining total asset
begin_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(STOCK_DIM+1)])*np.array(self.state[(STOCK_DIM+1):(STOCK_DIM*2+1)]))
# perform buy or sell action
argsort_actions = np.argsort(actions)
sell_index = argsort_actions[:np.where(actions < 0)[0].shape[0]]
buy_index = argsort_actions[::-1][:np.where(actions > 0)[0].shape[0]]
for index in sell_index:
# print('take sell action'.format(actions[index]))
self._sell_stock(index, actions[index])
for index in buy_index:
# print('take buy action: {}'.format(actions[index]))
self._buy_stock(index, actions[index])
# update data, walk a step s'
self.day += 1
self.data = self.df.loc[self.day,:]
#load next state
self.state = [self.state[0]] + \
[self.data.adjcp] + \
list(self.state[(STOCK_DIM+1):(STOCK_DIM*2+1)]) +\
[self.data.macd]
# calculate the end total asset
end_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(STOCK_DIM+1)])*np.array(self.state[(STOCK_DIM+1):(STOCK_DIM*2+1)]))
self.reward = end_total_asset - begin_total_asset
self.rewards_memory.append(self.reward)
#self.reward = self.reward * REWARD_SCALING
self.asset_memory.append(end_total_asset)
return self.state, self.reward, self.terminal, {}
def reset(self):
self.asset_memory = [INITIAL_ACCOUNT_BALANCE]
self.day = 0
self.data = self.df.loc[self.day,:]
self.cost = 0
self.trades = 0
self.terminal = False
self.rewards_memory = []
#initiate state
self.state = [INITIAL_ACCOUNT_BALANCE] + \
[self.data.adjcp] + \
[0]*STOCK_DIM + \
[self.data.macd]
return self.state
def render(self, mode='human'):
return self.state
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
###Output
_____no_output_____
###Markdown
Part 4: Implement DRL AlgorithmsThe implementation of the DRL algorithms are based on OpenAI Baselines and Stable Baselines. Stable Baselines is a fork of OpenAI Baselines, with a major structural refactoring, and code cleanups. Training data: 2009-01-01 to 2018-12-31
###Code
train = data_clean[(data_clean.datadate>='2009-01-01') & (data_clean.datadate<'2019-01-01')]
# the index needs to start from 0
train=train.reset_index(drop=True)
train.head()
###Output
_____no_output_____
###Markdown
Model Training: 4 models, PPO A2C, DDPG, TD3 Model 1: PPO
###Code
#tensorboard --logdir ./single_stock_tensorboard/
env_train = DummyVecEnv([lambda: SingleStockEnv(train)])
model_ppo = PPO2('MlpPolicy', env_train, tensorboard_log="./single_stock_trading_2_tensorboard/")
model_ppo.learn(total_timesteps=100000,tb_log_name="run_aapl_ppo")
#model.save('AAPL_ppo_100k')
###Output
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/common/tf_util.py:191: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/common/tf_util.py:200: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/common/policies.py:116: The name tf.variable_scope is deprecated. Please use tf.compat.v1.variable_scope instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/common/input.py:25: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/common/policies.py:561: flatten (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.
Instructions for updating:
Use keras.layers.flatten instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/layers/core.py:332: Layer.apply (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.
Instructions for updating:
Please use `layer.__call__` method instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/common/tf_layers.py:123: The name tf.get_variable is deprecated. Please use tf.compat.v1.get_variable instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/common/distributions.py:418: The name tf.random_normal is deprecated. Please use tf.random.normal instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/ppo2/ppo2.py:190: The name tf.summary.scalar is deprecated. Please use tf.compat.v1.summary.scalar instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/ppo2/ppo2.py:198: The name tf.trainable_variables is deprecated. Please use tf.compat.v1.trainable_variables instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/math_grad.py:1424: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.where in 2.0, which has the same broadcast rule as np.where
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/ppo2/ppo2.py:206: The name tf.train.AdamOptimizer is deprecated. Please use tf.compat.v1.train.AdamOptimizer instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/ppo2/ppo2.py:240: The name tf.global_variables_initializer is deprecated. Please use tf.compat.v1.global_variables_initializer instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/ppo2/ppo2.py:242: The name tf.summary.merge_all is deprecated. Please use tf.compat.v1.summary.merge_all instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/common/base_class.py:1169: The name tf.summary.FileWriter is deprecated. Please use tf.compat.v1.summary.FileWriter instead.
previous_total_asset:100000
end_total_asset:156098.18901026587
total_reward:56098.189010265865
total_cost: 6341.287377798898
total trades: 2491
Sharpe: 0.4187558739862496
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/common/tf_util.py:502: The name tf.Summary is deprecated. Please use tf.compat.v1.Summary instead.
previous_total_asset:100000
end_total_asset:94446.32340155944
total_reward:-5553.676598440565
total_cost: 2929.111106588759
total trades: 1887
Sharpe: -0.4449912464199369
previous_total_asset:100000
end_total_asset:106926.53386862631
total_reward:6926.533868626313
total_cost: 5599.619928583559
total trades: 2210
Sharpe: 0.13181291830866868
previous_total_asset:100000
end_total_asset:210752.72119495703
total_reward:110752.72119495703
total_cost: 6167.86721222417
total trades: 2436
Sharpe: 0.5912451258666666
previous_total_asset:100000
end_total_asset:266216.10048735235
total_reward:166216.10048735235
total_cost: 6268.2672412182
total trades: 2506
Sharpe: 0.6394795415623377
previous_total_asset:100000
end_total_asset:553507.7849352992
total_reward:453507.7849352992
total_cost: 6205.074570567108
total trades: 2514
Sharpe: 0.869382821594984
previous_total_asset:100000
end_total_asset:525815.480627327
total_reward:425815.480627327
total_cost: 6268.937668332546
total trades: 2515
Sharpe: 0.8563922247597354
previous_total_asset:100000
end_total_asset:579948.3273711266
total_reward:479948.3273711266
total_cost: 6264.915418211092
total trades: 2515
Sharpe: 0.8812822887651185
previous_total_asset:100000
end_total_asset:601018.8610150499
total_reward:501018.86101504986
total_cost: 6071.15111291963
total trades: 2514
Sharpe: 0.9042371048628427
previous_total_asset:100000
end_total_asset:501034.1646295194
total_reward:401034.1646295194
total_cost: 6162.337433171433
total trades: 2508
Sharpe: 0.830281620872439
previous_total_asset:100000
end_total_asset:457905.0789816795
total_reward:357905.0789816795
total_cost: 6250.532500542996
total trades: 2514
Sharpe: 0.8257661702659089
previous_total_asset:100000
end_total_asset:265638.6391078079
total_reward:165638.6391078079
total_cost: 6734.51020678946
total trades: 2509
Sharpe: 0.7966543085095101
previous_total_asset:100000
end_total_asset:176177.60613806313
total_reward:76177.60613806313
total_cost: 5210.800510190631
total trades: 2372
Sharpe: 0.6105657641565995
previous_total_asset:100000
end_total_asset:119068.00619850191
total_reward:19068.006198501913
total_cost: 5449.554538212252
total trades: 2340
Sharpe: 0.4782932891593345
previous_total_asset:100000
end_total_asset:212956.67157340347
total_reward:112956.67157340347
total_cost: 6368.901746125352
total trades: 2475
Sharpe: 0.6871472400668093
previous_total_asset:100000
end_total_asset:395805.73324403726
total_reward:295805.73324403726
total_cost: 6537.4196105855235
total trades: 2511
Sharpe: 0.8052338342289653
previous_total_asset:100000
end_total_asset:604812.5459749429
total_reward:504812.54597494286
total_cost: 6218.48193137824
total trades: 2513
Sharpe: 0.9101661298526685
previous_total_asset:100000
end_total_asset:582116.5546294623
total_reward:482116.5546294623
total_cost: 6211.65512132105
total trades: 2515
Sharpe: 0.9192984163452886
previous_total_asset:100000
end_total_asset:526174.6858831241
total_reward:426174.6858831241
total_cost: 6416.375233592592
total trades: 2515
Sharpe: 0.8856642983091633
previous_total_asset:100000
end_total_asset:553836.0514161409
total_reward:453836.05141614086
total_cost: 6567.770799179734
total trades: 2515
Sharpe: 0.8855059161955452
previous_total_asset:100000
end_total_asset:616036.9849392112
total_reward:516036.98493921116
total_cost: 6652.36958320929
total trades: 2512
Sharpe: 0.9329275065056303
previous_total_asset:100000
end_total_asset:568439.2268575617
total_reward:468439.22685756173
total_cost: 6487.975412838857
total trades: 2515
Sharpe: 0.9065578971932241
previous_total_asset:100000
end_total_asset:541432.7291863837
total_reward:441432.72918638366
total_cost: 6154.1131312864945
total trades: 2515
Sharpe: 0.875571568948455
previous_total_asset:100000
end_total_asset:769749.4838137159
total_reward:669749.4838137159
total_cost: 5731.190934403671
total trades: 2515
Sharpe: 0.9634334035816738
previous_total_asset:100000
end_total_asset:746253.3798512011
total_reward:646253.3798512011
total_cost: 5540.084347681433
total trades: 2514
Sharpe: 0.9554523241222211
previous_total_asset:100000
end_total_asset:702178.8201897977
total_reward:602178.8201897977
total_cost: 5961.959511756423
total trades: 2515
Sharpe: 0.9397244292873406
previous_total_asset:100000
end_total_asset:741096.6667565618
total_reward:641096.6667565618
total_cost: 5986.884645727058
total trades: 2514
Sharpe: 0.9557487033016611
previous_total_asset:100000
end_total_asset:729812.9256538266
total_reward:629812.9256538266
total_cost: 6011.705168113822
total trades: 2513
Sharpe: 0.9538379004377312
previous_total_asset:100000
end_total_asset:758236.5753620398
total_reward:658236.5753620398
total_cost: 6181.802640711599
total trades: 2513
Sharpe: 0.9636495498888424
previous_total_asset:100000
end_total_asset:772840.3734990709
total_reward:672840.3734990709
total_cost: 5596.33548413703
total trades: 2512
Sharpe: 0.9681238454395429
previous_total_asset:100000
end_total_asset:741677.1106373993
total_reward:641677.1106373993
total_cost: 5955.182698275548
total trades: 2514
Sharpe: 0.9595399727490882
previous_total_asset:100000
end_total_asset:764805.0461015248
total_reward:664805.0461015248
total_cost: 5891.292651756751
total trades: 2515
Sharpe: 0.9661904649783544
previous_total_asset:100000
end_total_asset:789673.3483401105
total_reward:689673.3483401105
total_cost: 5926.372793171938
total trades: 2515
Sharpe: 0.9741742994885249
previous_total_asset:100000
end_total_asset:741254.787990579
total_reward:641254.787990579
total_cost: 5821.799027481282
total trades: 2515
Sharpe: 0.9576394615424657
previous_total_asset:100000
end_total_asset:731611.5428951988
total_reward:631611.5428951988
total_cost: 6097.654599630185
total trades: 2515
Sharpe: 0.9514027303396922
previous_total_asset:100000
end_total_asset:724101.1807242595
total_reward:624101.1807242595
total_cost: 6112.808160692618
total trades: 2513
Sharpe: 0.9468299402074484
previous_total_asset:100000
end_total_asset:759113.1677076282
total_reward:659113.1677076282
total_cost: 6216.473084561016
total trades: 2515
Sharpe: 0.9682276096737484
previous_total_asset:100000
end_total_asset:740835.2222805698
total_reward:640835.2222805698
total_cost: 5994.8131451707895
total trades: 2515
Sharpe: 0.9614725290080312
previous_total_asset:100000
end_total_asset:757197.6833960349
total_reward:657197.6833960349
total_cost: 5919.665946050203
total trades: 2515
Sharpe: 0.964978933627224
###Markdown
Model 2: DDPG
###Code
#tensorboard --logdir ./single_stock_tensorboard/
env_train = DummyVecEnv([lambda: SingleStockEnv(train)])
model_ddpg = DDPG('MlpPolicy', env_train, tensorboard_log="./single_stock_trading_2_tensorboard/")
model_ddpg.learn(total_timesteps=100000, tb_log_name="run_aapl_ddpg")
#model.save('AAPL_ddpg_50k')
###Output
previous_total_asset:100000
end_total_asset:381113.46135500714
total_reward:281113.46135500714
total_cost: 99.90638327732879
total trades: 2250
Sharpe: 0.7026063856823678
previous_total_asset:100000
end_total_asset:1080609.6161442325
total_reward:980609.6161442325
total_cost: 99.8974009731182
total trades: 2515
Sharpe: 1.0705602895308748
previous_total_asset:100000
end_total_asset:1080615.5633120066
total_reward:980615.5633120066
total_cost: 99.89794435650313
total trades: 2515
Sharpe: 1.0705613143272277
previous_total_asset:100000
end_total_asset:1080615.5633120066
total_reward:980615.5633120066
total_cost: 99.89794435650313
total trades: 2515
Sharpe: 1.0705613143272277
previous_total_asset:100000
end_total_asset:1080615.5633120066
total_reward:980615.5633120066
total_cost: 99.89794435650313
total trades: 2515
Sharpe: 1.0705613143272277
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
###Markdown
Model 3: A2C
###Code
#tensorboard --logdir ./single_stock_tensorboard/
env_train = DummyVecEnv([lambda: SingleStockEnv(train)])
model_a2c = A2C('MlpPolicy', env_train, tensorboard_log="./single_stock_trading_2_tensorboard/")
model_a2c.learn(total_timesteps=100000,tb_log_name="run_aapl_a2c")
#model.save('AAPL_a2c_50k')
###Output
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/stable_baselines/a2c/a2c.py:184: The name tf.train.RMSPropOptimizer is deprecated. Please use tf.compat.v1.train.RMSPropOptimizer instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/training/rmsprop.py:119: calling Ones.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.
Instructions for updating:
Call initializer instance with the dtype argument instead of passing it to the constructor
previous_total_asset:100000
end_total_asset:143568.45233873036
total_reward:43568.45233873036
total_cost: 6387.934081530226
total trades: 2456
Sharpe: 0.4585063869500929
previous_total_asset:100000
end_total_asset:471519.15020942007
total_reward:371519.15020942007
total_cost: 3212.7233202738003
total trades: 2510
Sharpe: 0.8100652990954699
previous_total_asset:100000
end_total_asset:711997.8256528199
total_reward:611997.8256528199
total_cost: 1962.9036105102316
total trades: 2509
Sharpe: 0.9281065879650406
previous_total_asset:100000
end_total_asset:813730.9830055628
total_reward:713730.9830055628
total_cost: 5242.129027291222
total trades: 2515
Sharpe: 0.9784477406469194
previous_total_asset:100000
end_total_asset:763906.3490314147
total_reward:663906.3490314147
total_cost: 4817.21023018733
total trades: 2513
Sharpe: 0.9614375402950348
previous_total_asset:100000
end_total_asset:666065.8786650962
total_reward:566065.8786650962
total_cost: 4676.52369838125
total trades: 2515
Sharpe: 0.9251937168254919
previous_total_asset:100000
end_total_asset:828059.3046764581
total_reward:728059.3046764581
total_cost: 2756.847100758158
total trades: 2515
Sharpe: 0.9794847255543758
previous_total_asset:100000
end_total_asset:820407.5433832017
total_reward:720407.5433832017
total_cost: 3434.9401856560908
total trades: 2515
Sharpe: 0.9770267066894319
previous_total_asset:100000
end_total_asset:767188.7455583132
total_reward:667188.7455583132
total_cost: 3297.0731466450547
total trades: 2515
Sharpe: 0.9538168460285256
previous_total_asset:100000
end_total_asset:778635.5451063968
total_reward:678635.5451063968
total_cost: 4326.622281863198
total trades: 2514
Sharpe: 0.9661988894055867
previous_total_asset:100000
end_total_asset:271013.7760203545
total_reward:171013.77602035447
total_cost: 6035.33971987824
total trades: 2511
Sharpe: 0.6001095053202498
previous_total_asset:100000
end_total_asset:693888.6297474115
total_reward:593888.6297474115
total_cost: 6725.654488365585
total trades: 2512
Sharpe: 0.9477727396021005
previous_total_asset:100000
end_total_asset:695857.6520784842
total_reward:595857.6520784842
total_cost: 6559.052958980748
total trades: 2515
Sharpe: 0.9570767009582097
previous_total_asset:100000
end_total_asset:665060.478724533
total_reward:565060.478724533
total_cost: 6428.776309423134
total trades: 2513
Sharpe: 1.0284509105263875
previous_total_asset:100000
end_total_asset:304313.7964066868
total_reward:204313.79640668677
total_cost: 6560.517864200142
total trades: 2463
Sharpe: 0.685670350587845
previous_total_asset:100000
end_total_asset:556665.7202818475
total_reward:456665.7202818475
total_cost: 6514.5848537590755
total trades: 2515
Sharpe: 0.918155329470793
previous_total_asset:100000
end_total_asset:786268.58362437
total_reward:686268.58362437
total_cost: 6518.325190759874
total trades: 2515
Sharpe: 1.0064220446414565
previous_total_asset:100000
end_total_asset:821071.183253775
total_reward:721071.183253775
total_cost: 6503.402347123818
total trades: 2515
Sharpe: 1.0411410205557572
previous_total_asset:100000
end_total_asset:925445.9804322764
total_reward:825445.9804322764
total_cost: 6784.712702513881
total trades: 2515
Sharpe: 1.0404912926059942
previous_total_asset:100000
end_total_asset:962517.7903138469
total_reward:862517.7903138469
total_cost: 5193.491431148093
total trades: 2515
Sharpe: 1.0444274794914112
previous_total_asset:100000
end_total_asset:972332.4928350974
total_reward:872332.4928350974
total_cost: 6897.381129094436
total trades: 2515
Sharpe: 1.0570876131621412
previous_total_asset:100000
end_total_asset:945060.4014321667
total_reward:845060.4014321667
total_cost: 6735.608970433232
total trades: 2515
Sharpe: 1.0487443970392183
previous_total_asset:100000
end_total_asset:918670.8822703442
total_reward:818670.8822703442
total_cost: 6680.795828343729
total trades: 2515
Sharpe: 1.0403145093565656
previous_total_asset:100000
end_total_asset:932346.3690885586
total_reward:832346.3690885586
total_cost: 6617.01394678098
total trades: 2515
Sharpe: 1.047549236579285
previous_total_asset:100000
end_total_asset:929525.1868265397
total_reward:829525.1868265397
total_cost: 7027.402624023227
total trades: 2515
Sharpe: 1.0488654810492868
previous_total_asset:100000
end_total_asset:908426.3718980909
total_reward:808426.3718980909
total_cost: 6743.710549693906
total trades: 2515
Sharpe: 1.0533517402923307
previous_total_asset:100000
end_total_asset:837756.2032604109
total_reward:737756.2032604109
total_cost: 7696.352012277036
total trades: 2515
Sharpe: 1.0367535626224442
previous_total_asset:100000
end_total_asset:863230.8268439324
total_reward:763230.8268439324
total_cost: 7522.8635206924
total trades: 2515
Sharpe: 1.024741392144145
previous_total_asset:100000
end_total_asset:1068293.4052346733
total_reward:968293.4052346733
total_cost: 3950.7904742669434
total trades: 2515
Sharpe: 1.0696609048755705
previous_total_asset:100000
end_total_asset:1024396.5737258486
total_reward:924396.5737258486
total_cost: 7377.394023182965
total trades: 2515
Sharpe: 1.0612899463024488
previous_total_asset:100000
end_total_asset:985937.795078817
total_reward:885937.795078817
total_cost: 7331.481581167244
total trades: 2515
Sharpe: 1.0531098337914353
previous_total_asset:100000
end_total_asset:1039037.4584408774
total_reward:939037.4584408774
total_cost: 7189.234271772797
total trades: 2515
Sharpe: 1.060455776955667
previous_total_asset:100000
end_total_asset:1063517.41348354
total_reward:963517.4134835401
total_cost: 4901.12351607559
total trades: 2515
Sharpe: 1.0676215370687918
previous_total_asset:100000
end_total_asset:1031403.3840595726
total_reward:931403.3840595726
total_cost: 7692.226820285462
total trades: 2515
Sharpe: 1.0678040027978382
previous_total_asset:100000
end_total_asset:961527.0898816795
total_reward:861527.0898816795
total_cost: 7209.059959788929
total trades: 2515
Sharpe: 1.0537933358617513
previous_total_asset:100000
end_total_asset:1048865.3337775513
total_reward:948865.3337775513
total_cost: 3580.3174852377883
total trades: 2515
Sharpe: 1.0628302218014327
previous_total_asset:100000
end_total_asset:1072529.5638042036
total_reward:972529.5638042036
total_cost: 1634.1296899727581
total trades: 2515
Sharpe: 1.0683705166511808
previous_total_asset:100000
end_total_asset:1033626.8291941078
total_reward:933626.8291941078
total_cost: 6125.992164563634
total trades: 2515
Sharpe: 1.067705030388622
previous_total_asset:100000
end_total_asset:821092.0955071684
total_reward:721092.0955071684
total_cost: 5975.377095007534
total trades: 2515
Sharpe: 1.0242724391117501
###Markdown
Model 4: TD3
###Code
#tensorboard --logdir ./single_stock_tensorboard/
#DQN<DDPG<TD3
env_train = DummyVecEnv([lambda: SingleStockEnv(train)])
model_td3 = TD3('MlpPolicy', env_train, tensorboard_log="./single_stock_trading_2_tensorboard/")
model_td3.learn(total_timesteps=100000,tb_log_name="run_aapl_td3")
#model.save('AAPL_td3_50k')
###Output
previous_total_asset:100000
end_total_asset:778869.7048272111
total_reward:678869.7048272111
total_cost: 124.57847519610326
total trades: 2504
Sharpe: 0.9529387392421051
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
previous_total_asset:100000
end_total_asset:1080615.564949955
total_reward:980615.5649499551
total_cost: 99.89794448471066
total trades: 2515
Sharpe: 1.0705613146396975
###Markdown
Testing data
###Code
test = data_clean[(data_clean.datadate>='2019-01-01') ]
# the index needs to start from 0
test=test.reset_index(drop=True)
test.head()
###Output
_____no_output_____
###Markdown
TradingAssume that we have $100,000 initial capital at 2019-01-01. We use the TD3 model to trade AAPL.
###Code
model = model_td3
env_test = DummyVecEnv([lambda: SingleStockEnv(test)])
obs_test = env_test.reset()
print("==============Model Prediction===========")
for i in range(len(test.index.unique())):
action, _states = model.predict(obs_test)
obs_test, rewards, dones, info = env_test.step(action)
env_test.render()
###Output
==============Model Prediction===========
previous_total_asset:100000
end_total_asset:310696.8490824127
total_reward:210696.84908241272
total_cost: 99.87627464294434
total trades: 456
Sharpe: 1.864143095949389
###Markdown
Part 5: Backtest Our StrategyFor simplicity purposes, in the article, we just calculate the Sharpe ratio and the annual return manually.
###Code
def get_DRL_sharpe():
df_total_value=pd.read_csv('account_value.csv',index_col=0)
df_total_value.columns = ['account_value']
df_total_value['daily_return']=df_total_value.pct_change(1)
sharpe = (252**0.5)*df_total_value['daily_return'].mean()/ \
df_total_value['daily_return'].std()
annual_return = ((df_total_value['daily_return'].mean()+1)**252-1)*100
print("annual return: ", annual_return)
print("sharpe ratio: ", sharpe)
return df_total_value
def get_buy_and_hold_sharpe(test):
test['daily_return']=test['adjcp'].pct_change(1)
sharpe = (252**0.5)*test['daily_return'].mean()/ \
test['daily_return'].std()
annual_return = ((test['daily_return'].mean()+1)**252-1)*100
print("annual return: ", annual_return)
print("sharpe ratio: ", sharpe)
#return sharpe
df_total_value=get_DRL_sharpe()
get_buy_and_hold_sharpe(test)
DRL_cumulative_return = (df_total_value.account_value.pct_change(1)+1).cumprod()-1
buy_and_hold_cumulative_return = (test.adjcp.pct_change(1)+1).cumprod()-1
%matplotlib inline
fig, ax = plt.subplots(figsize=(12, 8))
plt.plot(test.datadate, DRL_cumulative_return, color='red',label = "DRL")
plt.plot(test.datadate, buy_and_hold_cumulative_return, label = "Buy & Hold")
plt.title("Cumulative Return for AAPL with Transaction Cost",size= 18)
plt.legend()
plt.rc('legend',fontsize=15)
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
###Output
_____no_output_____ |
Hands-on lab/notebooks/Register Model.ipynb | ###Markdown
Create and connect to an Azure Machine Learning WorkspaceRun the following cell to create a new Azure Machine Learning **Workspace**.**Important Note**: You will be prompted to login in the text that is output below the cell. Be sure to navigate to the URL displayed and enter the code that is provided. Once you have entered the code, return to this notebook and wait for the output to read `Workspace configuration succeeded`.
###Code
from azureml.core import Workspace, Experiment, Run
ws = Workspace.from_config()
print('Workspace configuration succeeded')
###Output
_____no_output_____
###Markdown
Get the Model Training Run**Load the run_info.json file that has the run id for the model training run**
###Code
import os
import json
output_path = './outputs'
run_info_filepath = os.path.join(output_path, 'run_info.json')
try:
with open(run_info_filepath) as f:
run_info = json.load(f)
print('run_info.json loaded')
print(run_info)
except:
print("Cannot open: ", run_info_filepath)
print("Please fix output_path before proceeding!")
###Output
_____no_output_____
###Markdown
**Get the Run object from the run id**
###Code
experiment_name = 'deep-learning'
#TODO: Get the run object from the run id using the Run and Experiment objects
###Output
_____no_output_____
###Markdown
Register Model **Register the Model with Azure Model Registry**
###Code
model_name = 'compliance-classifier'
model_description = 'Deep learning model to classify the descriptions of car components as compliant or non-compliant.'
#TODO: Register the Onnx model and tag the model with the prediction type (i.e. regression or classification or forecasting) and run id
print("Model Registered: {} \nModel Description: {} \nModel Version: {}".format(model_run.name,
model_run.description, model_run.version))
###Output
_____no_output_____
###Markdown
Create and connect to an Azure Machine Learning WorkspaceRun the following cell to create a new Azure Machine Learning **Workspace**.**Important Note**: You will be prompted to login in the text that is output below the cell. Be sure to navigate to the URL displayed and enter the code that is provided. Once you have entered the code, return to this notebook and wait for the output to read `Workspace configuration succeeded`.
###Code
from azureml.core import Workspace, Experiment, Run
ws = Workspace.from_config()
print('Workspace configuration succeeded')
###Output
_____no_output_____
###Markdown
Get the Model Training Run**Load the run_info.json file that has the run id for the model training run**
###Code
import os
import json
output_path = './outputs'
run_info_filepath = os.path.join(output_path, 'run_info.json')
try:
with open(run_info_filepath) as f:
run_info = json.load(f)
print('run_info.json loaded')
print(run_info)
except:
print("Cannot open: ", run_info_filepath)
print("Please fix output_path before proceeding!")
###Output
_____no_output_____
###Markdown
**Get the Run object from the run id**
###Code
experiment_name = 'deep-learning'
run = Run(Experiment(ws, experiment_name), run_info['id'])
###Output
_____no_output_____
###Markdown
Register Model **Register the Model with Azure Model Registry**
###Code
model_name = 'compliance-classifier'
model_description = 'Deep learning model to classify the descriptions of car components as compliant or non-compliant.'
model_run = run.register_model(model_name=model_name,
model_path="outputs/model/model.h5",
description=model_description,
tags={"type": "classification", "run_id": run.id})
print("Model Registered: {} \nModel Description: {} \nModel Version: {}".format(model_run.name,
model_run.description, model_run.version))
###Output
_____no_output_____
###Markdown
Create and connect to an Azure Machine Learning WorkspaceRun the following cell to create a new Azure Machine Learning **Workspace**.**Important Note**: You will be prompted to login in the text that is output below the cell. Be sure to navigate to the URL displayed and enter the code that is provided. Once you have entered the code, return to this notebook and wait for the output to read `Workspace configuration succeeded`.
###Code
from azureml.core import Workspace, Experiment, Run
ws = Workspace.from_config()
print('Workspace configuration succeeded')
###Output
Workspace configuration succeeded
###Markdown
Get the Model Training Run**Load the run_info.json file that has the run id for the model training run**
###Code
import os
import json
output_path = './outputs'
run_info_filepath = os.path.join(output_path, 'run_info.json')
try:
with open(run_info_filepath) as f:
run_info = json.load(f)
print('run_info.json loaded')
print(run_info)
except:
print("Cannot open: ", run_info_filepath)
print("Please fix output_path before proceeding!")
###Output
run_info.json loaded
{'id': 'deep-learning_1566913024_c01d0dc8'}
###Markdown
**Get the Run object from the run id**
###Code
experiment_name = 'deep-learning'
run = Run(Experiment(ws, experiment_name), run_info['id'])
###Output
_____no_output_____
###Markdown
Register Model **Register the Model with Azure Model Registry**
###Code
model_name = 'compliance-classifier'
model_description = 'Deep learning model to classify the descriptions of car components as compliant or non-compliant.'
model_run = run.register_model(model_name=model_name,
model_path="outputs/model/model.h5",
description=model_description,
tags={"type": "classification", "run_id": run.id})
print("Model Registered: {} \nModel Description: {} \nModel Version: {}".format(model_run.name,
model_run.description, model_run.version))
###Output
Model Registered: compliance-classifier
Model Description: Deep learning model to classify the descriptions of car components as compliant or non-compliant.
Model Version: 30
###Markdown
Create and connect to an Azure Machine Learning WorkspaceRun the following cell to create a new Azure Machine Learning **Workspace**.**Important Note**: You will be prompted to login in the text that is output below the cell. Be sure to navigate to the URL displayed and enter the code that is provided. Once you have entered the code, return to this notebook and wait for the output to read `Workspace configuration succeeded`.
###Code
from azureml.core import Workspace, Experiment, Run
ws = Workspace.from_config()
print('Workspace configuration succeeded')
###Output
Workspace configuration succeeded
###Markdown
Get the Model Training Run**Load the run_info.json file that has the run id for the model training run**
###Code
import os
import json
output_path = './outputs'
run_info_filepath = os.path.join(output_path, 'run_info.json')
try:
with open(run_info_filepath) as f:
run_info = json.load(f)
print('run_info.json loaded')
print(run_info)
except:
print("Cannot open: ", run_info_filepath)
print("Please fix output_path before proceeding!")
###Output
run_info.json loaded
{'id': 'deep-learning_1562005774_5e787bcf'}
###Markdown
**Get the Run object from the run id**
###Code
experiment_name = 'deep-learning'
run = Run(Experiment(ws, experiment_name), run_info['id'])
###Output
_____no_output_____
###Markdown
Register Model **Register the Model with Azure Model Registry**
###Code
model_name = 'compliance-classifier'
model_description = 'Deep learning model to classify the descriptions of car components as compliant or non-compliant.'
model_run = run.register_model(model_name=model_name,
model_path="outputs/model/model.h5",
tags={"type": "classification", "description": model_description, "run_id": run.id})
print("Model Registered: {} \nModel Description: {} \nModel Version: {}".format(model_run.name,
model_run.tags["description"], model_run.version))
###Output
Model Registered: compliance-classifier
Model Description: Deep learning model to classify the descriptions of car components as compliant or non-compliant.
Model Version: 20
###Markdown
Create and connect to an Azure Machine Learning WorkspaceRun the following cell to create a new Azure Machine Learning **Workspace**.**Important Note**: You will be prompted to login in the text that is output below the cell. Be sure to navigate to the URL displayed and enter the code that is provided. Once you have entered the code, return to this notebook and wait for the output to read `Workspace configuration succeeded`.
###Code
from azureml.core import Workspace, Experiment, Run
ws = Workspace.from_config()
print('Workspace configuration succeeded')
###Output
_____no_output_____
###Markdown
Get the Model Training Run**Load the run_info.json file that has the run id for the model training run**
###Code
import os
import json
output_path = './outputs'
run_info_filepath = os.path.join(output_path, 'run_info.json')
try:
with open(run_info_filepath) as f:
run_info = json.load(f)
print('run_info.json loaded')
print(run_info)
except:
print("Cannot open: ", run_info_filepath)
print("Please fix output_path before proceeding!")
###Output
_____no_output_____
###Markdown
**Get the Run object from the run id**
###Code
experiment_name = 'deep-learning'
run = Run(Experiment(ws, experiment_name), run_info['id'])
###Output
_____no_output_____
###Markdown
Register Model **Register the Model with Azure Model Registry**
###Code
model_name = 'compliance-classifier'
model_description = 'Deep learning model to classify the descriptions of car components as compliant or non-compliant.'
model_run = run.register_model(model_name=model_name,
model_path="outputs/model/model.onnx",
description=model_description,
tags={"type": "classification", "run_id": run.id})
print("Model Registered: {} \nModel Description: {} \nModel Version: {}".format(model_run.name,
model_run.description, model_run.version))
###Output
_____no_output_____
###Markdown
Create and connect to an Azure Machine Learning WorkspaceRun the following cell to create a new Azure Machine Learning **Workspace**.**Important Note**: You will be prompted to login in the text that is output below the cell. Be sure to navigate to the URL displayed and enter the code that is provided. Once you have entered the code, return to this notebook and wait for the output to read `Workspace configuration succeeded`.
###Code
from azureml.core import Workspace, Experiment, Run
ws = Workspace.from_config()
print('Workspace configuration succeeded')
###Output
_____no_output_____
###Markdown
Get the Model Training Run**Load the run_info.json file that has the run id for the model training run**
###Code
import os
import json
output_path = './outputs'
run_info_filepath = os.path.join(output_path, 'run_info.json')
try:
with open(run_info_filepath) as f:
run_info = json.load(f)
print('run_info.json loaded')
print(run_info)
except:
print("Cannot open: ", run_info_filepath)
print("Please fix output_path before proceeding!")
###Output
_____no_output_____
###Markdown
**Get the Run object from the run id**
###Code
experiment_name = 'deep-learning'
run = Run(Experiment(ws, experiment_name), run_info['id'])
###Output
_____no_output_____
###Markdown
Register Model **Register the Model with Azure Model Registry**
###Code
model_name = 'compliance-classifier'
model_description = 'Deep learning model to classify the descriptions of car components as compliant or non-compliant.'
model_run = run.register_model(model_name=model_name,
model_path="outputs/model/model.onnx",
description=model_description,
tags={"type": "classification", "run_id": run.id})
print("Model Registered: {} \nModel Description: {} \nModel Version: {}".format(model_run.name,
model_run.description, model_run.version))
###Output
_____no_output_____ |
deep learning/TensorFlow_examples/feature_sets.ipynb | ###Markdown
Copyright 2017 Google LLC.
###Code
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Feature Sets **Learning Objective:** Create a minimal set of features that performs just as well as a more complex feature set So far, we've thrown all of our features into the model. Models with fewer features use fewer resources and are easier to maintain. Let's see if we can build a model on a minimal set of housing features that will perform equally as well as one that uses all the features in the data set. SetupAs before, let's load and prepare the California housing data.
###Code
from __future__ import print_function
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
california_housing_dataframe = pd.read_csv("https://dl.google.com/mlcc/mledu-datasets/california_housing_train.csv", sep=",")
california_housing_dataframe = california_housing_dataframe.reindex(
np.random.permutation(california_housing_dataframe.index))
def preprocess_features(california_housing_dataframe):
"""Prepares input features from California housing data set.
Args:
california_housing_dataframe: A Pandas DataFrame expected to contain data
from the California housing data set.
Returns:
A DataFrame that contains the features to be used for the model, including
synthetic features.
"""
selected_features = california_housing_dataframe[
["latitude",
"longitude",
"housing_median_age",
"total_rooms",
"total_bedrooms",
"population",
"households",
"median_income"]]
processed_features = selected_features.copy()
# Create a synthetic feature.
processed_features["rooms_per_person"] = (
california_housing_dataframe["total_rooms"] /
california_housing_dataframe["population"])
return processed_features
def preprocess_targets(california_housing_dataframe):
"""Prepares target features (i.e., labels) from California housing data set.
Args:
california_housing_dataframe: A Pandas DataFrame expected to contain data
from the California housing data set.
Returns:
A DataFrame that contains the target feature.
"""
output_targets = pd.DataFrame()
# Scale the target to be in units of thousands of dollars.
output_targets["median_house_value"] = (
california_housing_dataframe["median_house_value"] / 1000.0)
return output_targets
# Choose the first 12000 (out of 17000) examples for training.
training_examples = preprocess_features(california_housing_dataframe.head(12000))
training_targets = preprocess_targets(california_housing_dataframe.head(12000))
# Choose the last 5000 (out of 17000) examples for validation.
validation_examples = preprocess_features(california_housing_dataframe.tail(5000))
validation_targets = preprocess_targets(california_housing_dataframe.tail(5000))
# Double-check that we've done the right thing.
print("Training examples summary:")
display.display(training_examples.describe())
print("Validation examples summary:")
display.display(validation_examples.describe())
print("Training targets summary:")
display.display(training_targets.describe())
print("Validation targets summary:")
display.display(validation_targets.describe())
###Output
_____no_output_____
###Markdown
Task 1: Develop a Good Feature Set**What's the best performance you can get with just 2 or 3 features?**A **correlation matrix** shows pairwise correlations, both for each feature compared to the target and for each feature compared to other features.Here, correlation is defined as the [Pearson correlation coefficient](https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient). You don't have to understand the mathematical details for this exercise.Correlation values have the following meanings: * `-1.0`: perfect negative correlation * `0.0`: no correlation * `1.0`: perfect positive correlation
###Code
correlation_dataframe = training_examples.copy()
correlation_dataframe["target"] = training_targets["median_house_value"]
correlation_dataframe.corr()
###Output
_____no_output_____
###Markdown
Features that have strong positive or negative correlations with the target will add information to our model. We can use the correlation matrix to find such strongly correlated features.We'd also like to have features that aren't so strongly correlated with each other, so that they add independent information.Use this information to try removing features. You can also try developing additional synthetic features, such as ratios of two raw features.For convenience, we've included the training code from the previous exercise.
###Code
def construct_feature_columns(input_features):
"""Construct the TensorFlow Feature Columns.
Args:
input_features: The names of the numerical input features to use.
Returns:
A set of feature columns
"""
return set([tf.feature_column.numeric_column(my_feature)
for my_feature in input_features])
def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):
"""Trains a linear regression model.
Args:
features: pandas DataFrame of features
targets: pandas DataFrame of targets
batch_size: Size of batches to be passed to the model
shuffle: True or False. Whether to shuffle the data.
num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
Returns:
Tuple of (features, labels) for next data batch
"""
# Convert pandas data into a dict of np arrays.
features = {key:np.array(value) for key,value in dict(features).items()}
# Construct a dataset, and configure batching/repeating.
ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
# Shuffle the data, if specified.
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
def train_model(
learning_rate,
steps,
batch_size,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear regression model.
In addition to training, this function also prints training progress information,
as well as a plot of the training and validation loss over time.
Args:
learning_rate: A `float`, the learning rate.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
training_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for training.
training_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for training.
validation_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for validation.
validation_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for validation.
Returns:
A `LinearRegressor` object trained on the training data.
"""
periods = 10
steps_per_period = steps / periods
# Create a linear regressor object.
my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
linear_regressor = tf.estimator.LinearRegressor(
feature_columns=construct_feature_columns(training_examples),
optimizer=my_optimizer
)
# Create input functions.
training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value"],
batch_size=batch_size)
predict_training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value"],
num_epochs=1,
shuffle=False)
predict_validation_input_fn = lambda: my_input_fn(validation_examples,
validation_targets["median_house_value"],
num_epochs=1,
shuffle=False)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("RMSE (on training data):")
training_rmse = []
validation_rmse = []
for period in range (0, periods):
# Train the model, starting from the prior state.
linear_regressor.train(
input_fn=training_input_fn,
steps=steps_per_period,
)
# Take a break and compute predictions.
training_predictions = linear_regressor.predict(input_fn=predict_training_input_fn)
training_predictions = np.array([item['predictions'][0] for item in training_predictions])
validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn)
validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])
# Compute training and validation loss.
training_root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(training_predictions, training_targets))
validation_root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(validation_predictions, validation_targets))
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, training_root_mean_squared_error))
# Add the loss metrics from this period to our list.
training_rmse.append(training_root_mean_squared_error)
validation_rmse.append(validation_root_mean_squared_error)
print("Model training finished.")
# Output a graph of loss metrics over periods.
plt.ylabel("RMSE")
plt.xlabel("Periods")
plt.title("Root Mean Squared Error vs. Periods")
plt.tight_layout()
plt.plot(training_rmse, label="training")
plt.plot(validation_rmse, label="validation")
plt.legend()
return linear_regressor
###Output
_____no_output_____
###Markdown
Spend 5 minutes searching for a good set of features and training parameters. Then check the solution to see what we chose. Don't forget that different features may require different learning parameters.
###Code
#
# Your code here: add your features of choice as a list of quoted strings.
#
minimal_features = [
]
assert minimal_features, "You must select at least one feature!"
minimal_training_examples = training_examples[minimal_features]
minimal_validation_examples = validation_examples[minimal_features]
#
# Don't forget to adjust these parameters.
#
train_model(
learning_rate=0.001,
steps=500,
batch_size=5,
training_examples=minimal_training_examples,
training_targets=training_targets,
validation_examples=minimal_validation_examples,
validation_targets=validation_targets)
###Output
_____no_output_____
###Markdown
SolutionClick below for a solution.
###Code
minimal_features = [
"median_income",
"latitude",
]
minimal_training_examples = training_examples[minimal_features]
minimal_validation_examples = validation_examples[minimal_features]
_ = train_model(
learning_rate=0.01,
steps=500,
batch_size=5,
training_examples=minimal_training_examples,
training_targets=training_targets,
validation_examples=minimal_validation_examples,
validation_targets=validation_targets)
###Output
_____no_output_____
###Markdown
Task 2: Make Better Use of LatitudePlotting `latitude` vs. `median_house_value` shows that there really isn't a linear relationship there.Instead, there are a couple of peaks, which roughly correspond to Los Angeles and San Francisco.
###Code
plt.scatter(training_examples["latitude"], training_targets["median_house_value"])
###Output
_____no_output_____
###Markdown
**Try creating some synthetic features that do a better job with latitude.**For example, you could have a feature that maps `latitude` to a value of `|latitude - 38|`, and call this `distance_from_san_francisco`.Or you could break the space into 10 different buckets. `latitude_32_to_33`, `latitude_33_to_34`, etc., each showing a value of `1.0` if `latitude` is within that bucket range and a value of `0.0` otherwise.Use the correlation matrix to help guide development, and then add them to your model if you find something that looks good.What's the best validation performance you can get?
###Code
#
# YOUR CODE HERE: Train on a new data set that includes synthetic features based on latitude.
#
###Output
_____no_output_____
###Markdown
SolutionClick below for a solution. Aside from `latitude`, we'll also keep `median_income`, to compare with the previous results.We decided to bucketize the latitude. This is fairly straightforward in Pandas using `Series.apply`.
###Code
LATITUDE_RANGES = zip(range(32, 44), range(33, 45))
def select_and_transform_features(source_df):
selected_examples = pd.DataFrame()
selected_examples["median_income"] = source_df["median_income"]
for r in LATITUDE_RANGES:
selected_examples["latitude_%d_to_%d" % r] = source_df["latitude"].apply(
lambda l: 1.0 if l >= r[0] and l < r[1] else 0.0)
return selected_examples
selected_training_examples = select_and_transform_features(training_examples)
selected_validation_examples = select_and_transform_features(validation_examples)
_ = train_model(
learning_rate=0.01,
steps=500,
batch_size=5,
training_examples=selected_training_examples,
training_targets=training_targets,
validation_examples=selected_validation_examples,
validation_targets=validation_targets)
###Output
_____no_output_____ |
notebooks/chap06.ipynb | ###Markdown
Modeling and Simulation in PythonChapter 6Copyright 2017 Allen DowneyLicense: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)
###Code
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
from pandas import read_html
###Output
_____no_output_____
###Markdown
Code from the previous chapter
###Code
filename = 'data/World_population_estimates.html'
tables = read_html(filename, header=0, index_col=0, decimal='M')
table2 = tables[2]
table2.columns = ['census', 'prb', 'un', 'maddison',
'hyde', 'tanton', 'biraben', 'mj',
'thomlinson', 'durand', 'clark']
un = table2.un / 1e9
un.head()
census = table2.census / 1e9
census.head()
t_0 = get_first_label(census)
t_end = get_last_label(census)
elapsed_time = t_end - t_0
p_0 = get_first_value(census)
p_end = get_last_value(census)
total_growth = p_end - p_0
annual_growth = total_growth / elapsed_time
###Output
_____no_output_____
###Markdown
System objects We can rewrite the code from the previous chapter using system objects.
###Code
system = System(t_0=t_0,
t_end=t_end,
p_0=p_0,
annual_growth=annual_growth)
###Output
_____no_output_____
###Markdown
And we can encapsulate the code that runs the model in a function.
###Code
def run_simulation1(system):
"""Runs the constant growth model.
system: System object
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linrange(system.t_0, system.t_end):
results[t+1] = results[t] + system.annual_growth
return results
###Output
_____no_output_____
###Markdown
We can also encapsulate the code that plots the results.
###Code
def plot_results(census, un, timeseries, title):
"""Plot the estimates and the model.
census: TimeSeries of population estimates
un: TimeSeries of population estimates
timeseries: TimeSeries of simulation results
title: string
"""
plot(census, ':', label='US Census')
plot(un, '--', label='UN DESA')
plot(timeseries, color='gray', label='model')
decorate(xlabel='Year',
ylabel='World population (billion)',
title=title)
###Output
_____no_output_____
###Markdown
Here's how we run it.
###Code
results = run_simulation1(system)
plot_results(census, un, results, 'Constant growth model')
###Output
_____no_output_____
###Markdown
Proportional growth Here's a more realistic model where the number of births and deaths is proportional to the current population.
###Code
def run_simulation2(system):
"""Run a model with proportional birth and death.
system: System object
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linrange(system.t_0, system.t_end):
births = system.birth_rate * results[t]
deaths = system.death_rate * results[t]
results[t+1] = results[t] + births - deaths
return results
###Output
_____no_output_____
###Markdown
I picked a death rate that seemed reasonable and then adjusted the birth rate to fit the data.
###Code
system.death_rate = 0.01
system.birth_rate = 0.027
###Output
_____no_output_____
###Markdown
Here's what it looks like.
###Code
results = run_simulation2(system)
plot_results(census, un, results, 'Proportional model')
savefig('figs/chap06-fig01.pdf')
###Output
Saving figure to file figs/chap06-fig01.pdf
###Markdown
The model fits the data pretty well for the first 20 years, but not so well after that. Factoring out the update function `run_simulation1` and `run_simulation2` are nearly identical except the body of the loop. So we can factor that part out into a function.
###Code
def update_func1(pop, t, system):
"""Compute the population next year.
pop: current population
t: current year
system: system object containing parameters of the model
returns: population next year
"""
births = system.birth_rate * pop
deaths = system.death_rate * pop
return pop + births - deaths
###Output
_____no_output_____
###Markdown
The name `update_func` refers to a function object.
###Code
update_func1
###Output
_____no_output_____
###Markdown
Which we can confirm by checking its type.
###Code
type(update_func1)
###Output
_____no_output_____
###Markdown
`run_simulation` takes the update function as a parameter and calls it just like any other function.
###Code
def run_simulation(system, update_func):
"""Simulate the system using any update function.
system: System object
update_func: function that computes the population next year
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linrange(system.t_0, system.t_end):
results[t+1] = update_func(results[t], t, system)
return results
###Output
_____no_output_____
###Markdown
Here's how we use it.
###Code
t_0 = get_first_label(census)
t_end = get_last_label(census)
p_0 = census[t_0]
system = System(t_0=t_0,
t_end=t_end,
p_0=p_0,
birth_rate=0.027,
death_rate=0.01)
results = run_simulation(system, update_func1)
plot_results(census, un, results, 'Proportional model, factored')
###Output
_____no_output_____
###Markdown
Remember not to put parentheses after `update_func1`. What happens if you try? You get an error saying the function, update_func1(), is missing the 3 required arguments pop,t and system.
###Code
#results2 = run_simulation(system,update_func1())
###Output
_____no_output_____
###Markdown
**Exercise:** When you run `run_simulation`, it runs `update_func1` once for each year between `t_0` and `t_end`. To see that for yourself, add a print statement at the beginning of `update_func1` that prints the values of `t` and `pop`, then run `run_simulation` again.
###Code
def update_func1(pop, t, system):
"""Compute the population next year.
pop: current population
t: current year
system: system object containing parameters of the model
returns: population next year
"""
print('t: ' + str(t) + ' pop: ' + str(pop))
births = system.birth_rate * pop
deaths = system.death_rate * pop
return pop + births - deaths
run_simulation(system, update_func1)
###Output
t: 1950 pop: 2.557628654
t: 1951 pop: 2.601108341118
t: 1952 pop: 2.645327182917006
t: 1953 pop: 2.6902977450265952
t: 1954 pop: 2.7360328066920476
t: 1955 pop: 2.7825453644058125
t: 1956 pop: 2.829848635600711
t: 1957 pop: 2.8779560624059233
t: 1958 pop: 2.926881315466824
t: 1959 pop: 2.9766382978297603
t: 1960 pop: 3.0272411488928666
t: 1961 pop: 3.078704248424045
t: 1962 pop: 3.131042220647254
t: 1963 pop: 3.184269938398258
t: 1964 pop: 3.2384025273510284
t: 1965 pop: 3.293455370315996
t: 1966 pop: 3.349444111611368
t: 1967 pop: 3.406384661508761
t: 1968 pop: 3.46429320075441
t: 1969 pop: 3.523186185167235
t: 1970 pop: 3.583080350315078
t: 1971 pop: 3.6439927162704344
t: 1972 pop: 3.7059405924470314
t: 1973 pop: 3.768941582518631
t: 1974 pop: 3.833013589421448
t: 1975 pop: 3.8981748204416125
t: 1976 pop: 3.9644437923891203
t: 1977 pop: 4.031839336859735
t: 1978 pop: 4.100380605586351
t: 1979 pop: 4.170087075881319
t: 1980 pop: 4.240978556171301
t: 1981 pop: 4.313075191626214
t: 1982 pop: 4.3863974698838595
t: 1983 pop: 4.460966226871885
t: 1984 pop: 4.536802652728707
t: 1985 pop: 4.613928297825096
t: 1986 pop: 4.692365078888122
t: 1987 pop: 4.77213528522922
t: 1988 pop: 4.8532615850781164
t: 1989 pop: 4.935767032024445
t: 1990 pop: 5.0196750715688605
t: 1991 pop: 5.1050095477855315
t: 1992 pop: 5.191794710097886
t: 1993 pop: 5.28005522016955
t: 1994 pop: 5.369816158912433
t: 1995 pop: 5.461103033613944
t: 1996 pop: 5.553941785185382
t: 1997 pop: 5.648358795533533
t: 1998 pop: 5.744380895057604
t: 1999 pop: 5.8420353702735826
t: 2000 pop: 5.941349971568234
t: 2001 pop: 6.042352921084894
t: 2002 pop: 6.145072920743337
t: 2003 pop: 6.249539160395973
t: 2004 pop: 6.355781326122704
t: 2005 pop: 6.46382960866679
t: 2006 pop: 6.573714712014126
t: 2007 pop: 6.685467862118366
t: 2008 pop: 6.799120815774378
t: 2009 pop: 6.9147058696425425
t: 2010 pop: 7.032255869426465
t: 2011 pop: 7.151804219206714
t: 2012 pop: 7.273384890933229
t: 2013 pop: 7.397032434079093
t: 2014 pop: 7.522781985458438
t: 2015 pop: 7.650669279211232
###Markdown
Combining birth and death Since births and deaths get added up, we don't have to compute them separately. We can combine the birth and death rates into a single net growth rate.
###Code
def update_func2(pop, t, system):
"""Compute the population next year.
pop: current population
t: current year
system: system object containing parameters of the model
returns: population next year
"""
net_growth = system.alpha * pop
return pop + net_growth
###Output
_____no_output_____
###Markdown
Here's how it works:
###Code
system.alpha = system.birth_rate - system.death_rate
results = run_simulation(system, update_func2)
plot_results(census, un, results, 'Proportional model, combined birth and death')
###Output
_____no_output_____
###Markdown
Exercises**Exercise:** Maybe the reason the proportional model doesn't work very well is that the growth rate, `alpha`, is changing over time. So let's try a model with different growth rates before and after 1980 (as an arbitrary choice).Write an update function that takes `pop`, `t`, and `system` as parameters. The system object, `system`, should contain two parameters: the growth rate before 1980, `alpha1`, and the growth rate after 1980, `alpha2`. It should use `t` to determine which growth rate to use. Note: Don't forget the `return` statement.Test your function by calling it directly, then pass it to `run_simulation`. Plot the results. Adjust the parameters `alpha1` and `alpha2` to fit the data as well as you can.
###Code
system.death_rate_pre_1980 = 0.01
system.birth_rate_pre_1980 = 0.0285
system.death_rate_post_1980 = 0.0075
system.birth_rate_post_1980 = 0.023
system.alpha1 = system.birth_rate_pre_1980 - system.death_rate_pre_1980
system.alpha2 = system.birth_rate_post_1980 - system.death_rate_post_1980
def update_func3(pop,t,system):
"""Compute the population next year.
pop: current population
t: current year
system: system object containing parameters of the model
returns: population next year
"""
if (t < 1980):
net_growth = system.alpha1 * pop
else:
net_growth = system.alpha2 * pop
return pop + net_growth
print(update_func3(3000,1950,system))
print(update_func3(3000,1980,system))
print(update_func3(3000,1981,system))
results = run_simulation(system, update_func3)
plot_results(census, un, results, 'Model with separate growth rates before and after 1980')
###Output
_____no_output_____
###Markdown
Modeling and Simulation in PythonChapter 6Copyright 2017 Allen DowneyLicense: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0) Assignment 6 Completed by: Philip Tanofsky
###Code
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
from pandas import read_html
###Output
_____no_output_____
###Markdown
Code from the previous chapter
###Code
filename = 'data/World_population_estimates.html'
tables = read_html(filename, header=0, index_col=0, decimal='M')
table2 = tables[2]
table2.columns = ['census', 'prb', 'un', 'maddison',
'hyde', 'tanton', 'biraben', 'mj',
'thomlinson', 'durand', 'clark']
un = table2.un / 1e9
un.head()
census = table2.census / 1e9
census.head()
t_0 = get_first_label(census)
t_end = get_last_label(census)
elapsed_time = t_end - t_0
p_0 = get_first_value(census)
p_end = get_last_value(census)
total_growth = p_end - p_0
annual_growth = total_growth / elapsed_time
###Output
_____no_output_____
###Markdown
System objects We can rewrite the code from the previous chapter using system objects.
###Code
system = System(t_0=t_0,
t_end=t_end,
p_0=p_0,
annual_growth=annual_growth)
###Output
_____no_output_____
###Markdown
And we can encapsulate the code that runs the model in a function.
###Code
def run_simulation1(system):
"""Runs the constant growth model.
system: System object
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linrange(system.t_0, system.t_end):
results[t+1] = results[t] + system.annual_growth
return results
###Output
_____no_output_____
###Markdown
We can also encapsulate the code that plots the results.
###Code
def plot_results(census, un, timeseries, title):
"""Plot the estimates and the model.
census: TimeSeries of population estimates
un: TimeSeries of population estimates
timeseries: TimeSeries of simulation results
title: string
"""
plot(census, ':', label='US Census')
plot(un, '--', label='UN DESA')
plot(timeseries, color='gray', label='model')
decorate(xlabel='Year',
ylabel='World population (billion)',
title=title)
###Output
_____no_output_____
###Markdown
Here's how we run it.
###Code
results = run_simulation1(system)
plot_results(census, un, results, 'Constant growth model')
###Output
_____no_output_____
###Markdown
Proportional growth Here's a more realistic model where the number of births and deaths is proportional to the current population.
###Code
def run_simulation2(system):
"""Run a model with proportional birth and death.
system: System object
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linrange(system.t_0, system.t_end):
births = system.birth_rate * results[t]
deaths = system.death_rate * results[t]
results[t+1] = results[t] + births - deaths
return results
###Output
_____no_output_____
###Markdown
I picked a death rate that seemed reasonable and then adjusted the birth rate to fit the data.
###Code
system.death_rate = 0.01
system.birth_rate = 0.027
###Output
_____no_output_____
###Markdown
Here's what it looks like.
###Code
results = run_simulation2(system)
plot_results(census, un, results, 'Proportional model')
savefig('figs/chap06-fig01.pdf')
###Output
Saving figure to file figs/chap06-fig01.pdf
###Markdown
The model fits the data pretty well for the first 20 years, but not so well after that. Factoring out the update function `run_simulation1` and `run_simulation2` are nearly identical except the body of the loop. So we can factor that part out into a function.
###Code
def update_func1(pop, t, system):
"""Compute the population next year.
pop: current population
t: current year
system: system object containing parameters of the model
returns: population next year
"""
print(t, pop)
births = system.birth_rate * pop
deaths = system.death_rate * pop
return pop + births - deaths
###Output
_____no_output_____
###Markdown
The name `update_func` refers to a function object.
###Code
update_func1
###Output
_____no_output_____
###Markdown
Which we can confirm by checking its type.
###Code
type(update_func1)
###Output
_____no_output_____
###Markdown
`run_simulation` takes the update function as a parameter and calls it just like any other function.
###Code
def run_simulation(system, update_func):
"""Simulate the system using any update function.
system: System object
update_func: function that computes the population next year
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linrange(system.t_0, system.t_end):
results[t+1] = update_func(results[t], t, system)
return results
###Output
_____no_output_____
###Markdown
Here's how we use it.
###Code
t_0 = get_first_label(census)
t_end = get_last_label(census)
p_0 = census[t_0]
system = System(t_0=t_0,
t_end=t_end,
p_0=p_0,
birth_rate=0.027,
death_rate=0.01)
results = run_simulation(system, update_func1)
plot_results(census, un, results, 'Proportional model, factored')
###Output
1950 2.557628654
1951 2.601108341118
1952 2.645327182917006
1953 2.6902977450265952
1954 2.7360328066920476
1955 2.7825453644058125
1956 2.829848635600711
1957 2.8779560624059233
1958 2.926881315466824
1959 2.9766382978297603
1960 3.0272411488928666
1961 3.078704248424045
1962 3.131042220647254
1963 3.184269938398258
1964 3.2384025273510284
1965 3.293455370315996
1966 3.349444111611368
1967 3.406384661508761
1968 3.46429320075441
1969 3.523186185167235
1970 3.583080350315078
1971 3.6439927162704344
1972 3.7059405924470314
1973 3.768941582518631
1974 3.833013589421448
1975 3.8981748204416125
1976 3.9644437923891203
1977 4.031839336859735
1978 4.100380605586351
1979 4.170087075881319
1980 4.240978556171301
1981 4.313075191626214
1982 4.3863974698838595
1983 4.460966226871885
1984 4.536802652728707
1985 4.613928297825096
1986 4.692365078888122
1987 4.77213528522922
1988 4.8532615850781164
1989 4.935767032024445
1990 5.0196750715688605
1991 5.1050095477855315
1992 5.191794710097886
1993 5.28005522016955
1994 5.369816158912433
1995 5.461103033613944
1996 5.553941785185382
1997 5.648358795533533
1998 5.744380895057604
1999 5.8420353702735826
2000 5.941349971568234
2001 6.042352921084894
2002 6.145072920743337
2003 6.249539160395973
2004 6.355781326122704
2005 6.46382960866679
2006 6.573714712014126
2007 6.685467862118366
2008 6.799120815774378
2009 6.9147058696425425
2010 7.032255869426465
2011 7.151804219206714
2012 7.273384890933229
2013 7.397032434079093
2014 7.522781985458438
2015 7.650669279211232
###Markdown
Remember not to put parentheses after `update_func1`. What happens if you try?**Answer:** TypeError: update_func1() missing 3 required positional arguments: 'pop', 't', and 'system' **Exercise:** When you run `run_simulation`, it runs `update_func1` once for each year between `t_0` and `t_end`. To see that for yourself, add a print statement at the beginning of `update_func1` that prints the values of `t` and `pop`, then run `run_simulation` again. **Answer:** Completed. The above list of years with corresponding population is displayed as seen above. Combining birth and death Since births and deaths get added up, we don't have to compute them separately. We can combine the birth and death rates into a single net growth rate.
###Code
def update_func2(pop, t, system):
"""Compute the population next year.
pop: current population
t: current year
system: system object containing parameters of the model
returns: population next year
"""
net_growth = system.alpha * pop
return pop + net_growth
###Output
_____no_output_____
###Markdown
Here's how it works:
###Code
system.alpha = system.birth_rate - system.death_rate
results = run_simulation(system, update_func2)
plot_results(census, un, results, 'Proportional model, combined birth and death')
###Output
_____no_output_____
###Markdown
Exercises**Exercise:** Maybe the reason the proportional model doesn't work very well is that the growth rate, `alpha`, is changing over time. So let's try a model with different growth rates before and after 1980 (as an arbitrary choice).Write an update function that takes `pop`, `t`, and `system` as parameters. The system object, `system`, should contain two parameters: the growth rate before 1980, `alpha1`, and the growth rate after 1980, `alpha2`. It should use `t` to determine which growth rate to use. Note: Don't forget the `return` statement.Test your function by calling it directly, then pass it to `run_simulation`. Plot the results. Adjust the parameters `alpha1` and `alpha2` to fit the data as well as you can.
###Code
def update_func3(pop, t, system):
"""Compute the population next year.
pop: current population
t: current year
system: system object containing parameters of the model
returns: population next year
"""
if (t < 1980):
net_growth = system.alpha1 * pop
else:
net_growth = system.alpha2 * pop
return pop + net_growth
# Default system to values from above with
system = System(t_0=t_0,
t_end=t_end,
p_0=p_0,
alpha1 = 0.019,
alpha2 = 0.014)
population = update_func3(system.p_0, system.t_0, system)
(system.p_0 * system.alpha1) + system.p_0
###Output
_____no_output_____
###Markdown
For year as 1950, the result for population based on alpha1 is correct. Calling new update_func3 with same population but instead with year (1990) greater than 1980.
###Code
population = update_func3(system.p_0, 1990, system)
(system.p_0 * system.alpha2) + system.p_0
###Output
_____no_output_____
###Markdown
For year as 1990, the result for population based on alpha2 is correct.
###Code
# Final part of exercise
system = System(t_0=t_0,
t_end=t_end,
p_0=p_0,
alpha1 = 0.019,
alpha2 = 0.014)
results = run_simulation(system, update_func3)
plot_results(census, un, results, 'Proportional model, combined birth and death')
###Output
_____no_output_____
###Markdown
Modeling and Simulation in PythonChapter 6Copyright 2017 Allen DowneyLicense: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)
###Code
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
from pandas import read_html
###Output
_____no_output_____
###Markdown
Code from the previous chapter
###Code
filename = 'data/World_population_estimates.html'
tables = read_html(filename, header=0, index_col=0, decimal='M')
table2 = tables[2]
table2.columns = ['census', 'prb', 'un', 'maddison',
'hyde', 'tanton', 'biraben', 'mj',
'thomlinson', 'durand', 'clark']
un = table2.un / 1e9
un.head()
census = table2.census / 1e9
census.head()
t_0 = get_first_label(census)
t_end = get_last_label(census)
elapsed_time = t_end - t_0
p_0 = get_first_value(census)
p_end = get_last_value(census)
total_growth = p_end - p_0
annual_growth = total_growth / elapsed_time
###Output
_____no_output_____
###Markdown
System objects We can rewrite the code from the previous chapter using system objects.
###Code
system = System(t_0=t_0,
t_end=t_end,
p_0=p_0,
annual_growth=annual_growth)
###Output
_____no_output_____
###Markdown
And we can encapsulate the code that runs the model in a function.
###Code
def run_simulation1(system):
"""Runs the constant growth model.
system: System object
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linrange(system.t_0, system.t_end):
results[t+1] = results[t] + system.annual_growth
return results
###Output
_____no_output_____
###Markdown
We can also encapsulate the code that plots the results.
###Code
def plot_results(census, un, timeseries, title):
"""Plot the estimates and the model.
census: TimeSeries of population estimates
un: TimeSeries of population estimates
timeseries: TimeSeries of simulation results
title: string
"""
plot(census, ':', label='US Census')
plot(un, '--', label='UN DESA')
plot(timeseries, color='gray', label='model')
decorate(xlabel='Year',
ylabel='World population (billion)',
title=title)
###Output
_____no_output_____
###Markdown
Here's how we run it.
###Code
results = run_simulation1(system)
plot_results(census, un, results, 'Constant growth model')
###Output
_____no_output_____
###Markdown
Proportional growth Here's a more realistic model where the number of births and deaths is proportional to the current population.
###Code
def run_simulation2(system):
"""Run a model with proportional birth and death.
system: System object
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linrange(system.t_0, system.t_end):
births = system.birth_rate * results[t]
deaths = system.death_rate * results[t]
results[t+1] = results[t] + births - deaths
return results
###Output
_____no_output_____
###Markdown
I picked a death rate that seemed reasonable and then adjusted the birth rate to fit the data.
###Code
system.death_rate = 0.01
system.birth_rate = 0.027
###Output
_____no_output_____
###Markdown
Here's what it looks like.
###Code
results = run_simulation2(system)
plot_results(census, un, results, 'Proportional model')
savefig('figs/chap06-fig01.pdf')
###Output
Saving figure to file figs/chap06-fig01.pdf
###Markdown
The model fits the data pretty well for the first 20 years, but not so well after that. Factoring out the update function `run_simulation1` and `run_simulation2` are nearly identical except the body of the loop. So we can factor that part out into a function.
###Code
def update_func1(pop, t, system):
"""Compute the population next year.
pop: current population
t: current year
system: system object containing parameters of the model
returns: population next year
"""
births = system.birth_rate * pop
deaths = system.death_rate * pop
return pop + births - deaths
###Output
_____no_output_____
###Markdown
The name `update_func` refers to a function object.
###Code
update_func1
###Output
_____no_output_____
###Markdown
Which we can confirm by checking its type.
###Code
type(update_func1)
###Output
_____no_output_____
###Markdown
`run_simulation` takes the update function as a parameter and calls it just like any other function.
###Code
def run_simulation(system, update_func):
"""Simulate the system using any update function.
system: System object
update_func: function that computes the population next year
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linrange(system.t_0, system.t_end):
results[t+1] = update_func(results[t], t, system)
return results
###Output
_____no_output_____
###Markdown
Here's how we use it.
###Code
t_0 = get_first_label(census)
t_end = get_last_label(census)
p_0 = census[t_0]
system = System(t_0=t_0,
t_end=t_end,
p_0=p_0,
birth_rate=0.027,
death_rate=0.01)
results = run_simulation(system, update_func1)
plot_results(census, un, results, 'Proportional model, factored')
###Output
_____no_output_____
###Markdown
Remember not to put parentheses after `update_func1`. What happens if you try? **Exercise:** When you run `run_simulation`, it runs `update_func1` once for each year between `t_0` and `t_end`. To see that for yourself, add a print statement at the beginning of `update_func1` that prints the values of `t` and `pop`, then run `run_simulation` again. Combining birth and death Since births and deaths get added up, we don't have to compute them separately. We can combine the birth and death rates into a single net growth rate.
###Code
def update_func2(pop, t, system):
"""Compute the population next year.
pop: current population
t: current year
system: system object containing parameters of the model
returns: population next year
"""
net_growth = system.alpha * pop
return pop + net_growth
###Output
_____no_output_____
###Markdown
Here's how it works:
###Code
system.alpha = system.birth_rate - system.death_rate
results = run_simulation(system, update_func2)
plot_results(census, un, results, 'Proportional model, combined birth and death')
###Output
_____no_output_____
###Markdown
Exercises**Exercise:** Maybe the reason the proportional model doesn't work very well is that the growth rate, `alpha`, is changing over time. So let's try a model with different growth rates before and after 1980 (as an arbitrary choice).Write an update function that takes `pop`, `t`, and `system` as parameters. The system object, `system`, should contain two parameters: the growth rate before 1980, `alpha1`, and the growth rate after 1980, `alpha2`. It should use `t` to determine which growth rate to use. Note: Don't forget the `return` statement.Test your function by calling it directly, then pass it to `run_simulation`. Plot the results. Adjust the parameters `alpha1` and `alpha2` to fit the data as well as you can. I'll use a logistic curve which will be centered at around 1980. Each value on the curve is a rate
###Code
log_series = []
for t in range(1950, 2016,1):
log_series.append(1/(1+np.exp((t-1980)/21))*.022 +.015)
plt.plot(log_series);
def update_func3(pop, t, system, index):
"""Compute the population next year.
pop: current population
t: current year
system: system object containing parameters of the model
returns: population next year
"""
births = log_series[index] * pop
deaths = system.death_rate * pop
return pop + births - deaths
def run_simulation(system, update_func):
"""Simulate the system using any update function.
system: System object
update_func: function that computes the population next year
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for index, t in enumerate(linrange(system.t_0, system.t_end)):
results[t+1] = update_func3(results[t], t, system, index)
return results
results = run_simulation(system, update_func3)
plot_results(census, un, results, 'Proportional model, combined birth and death')
###Output
_____no_output_____
###Markdown
The values aren't great, but we can at least see a change in the curve when we reach 1980! The second derivative is 0. See the code below for the second derivative
###Code
# find first derivatives for the series
dy=np.diff(results.values,1)
dx=np.diff(results.index,1)
yfirst=dy/dx
xfirst=0.5*(results.index[:-1]+results.index[1:])
None
# find second derivatives
dyfirst=np.diff(yfirst,1)
dxfirst=np.diff(xfirst,1)
ysecond=dyfirst/dxfirst
None
plt.plot(results.index[:-2],
ysecond);
plt.xlabel('Year')
plt.ylabel('d^2y/dx^2')
###Output
_____no_output_____
###Markdown
Think BayesSecond EditionCopyright 2020 Allen B. DowneyLicense: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/)
###Code
# If we're running on Colab, install empiricaldist
# https://pypi.org/project/empiricaldist/
import sys
IN_COLAB = 'google.colab' in sys.modules
if IN_COLAB:
!pip install empiricaldist
# Get utils.py and create directories
import os
if not os.path.exists('utils.py'):
!wget https://github.com/AllenDowney/ThinkBayes2/raw/master/code/soln/utils.py
if not os.path.exists('figs'):
!mkdir figs
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from empiricaldist import Pmf, Cdf
from utils import decorate, savefig
###Output
_____no_output_____
###Markdown
IntroductionIn the previous chapter we computed distributions of sums, differences, products, and quotients.In this chapter, we'll compute distributions of minima and maxima use them to solve inference problems.Then we'll look at distributions that are mixtures of other distributions, which will turn out to be particularly useful for making predictions.But we'll start with a powerful tool for working with distributions, the cumulative distribution function. Cumulative Distribution FunctionsSo far we have been using probability mass functions to represent distributions.A useful alternative is the **cumulative distribution function**, or CDF.As an example, I'll use the posterior distribution from the Euro problem, which we computed in Chapter 3.Here's the uniform prior we started with.
###Code
hypos = np.linspace(0, 1, 101)
pmf = Pmf(1, hypos)
data = 140, 250
###Output
_____no_output_____
###Markdown
And here's the update.
###Code
from scipy.stats import binom
def update_binomial(pmf, data):
"""Update the PMF using the binomial distribution.
pmf: Pmf representing the prior
data: tuple of integers k and n
"""
k, n = data
xs = pmf.qs
likelihood = binom.pmf(k, n, xs)
pmf *= likelihood
pmf.normalize()
update_binomial(pmf, data)
###Output
_____no_output_____
###Markdown
The CDF is the cumulative sum of the PMF, so we can compute it like this:
###Code
cumulative = pmf.cumsum()
###Output
_____no_output_____
###Markdown
Here's what it looks like.
###Code
def decorate_euro(title):
decorate(xlabel='Proportion of heads (x)',
ylabel='Probability',
title=title)
cumulative.plot(label='CDF')
pmf.plot(label='PMF')
decorate_euro(title='Posterior distribution for the Euro problem')
savefig('fig06-01')
###Output
_____no_output_____
###Markdown
The range of the CDF is always from 0 to 1, in contrast with the PMF, where the maximum can be any probability.The result is a Pandas Series, so we can use the bracket operator to select an element:
###Code
cumulative[0.61]
###Output
_____no_output_____
###Markdown
The result is about 0.96, which means that the total probability of all quantities less than or equal to 0.61 is 96%.To go the other way --- to look up a probability and get the corresponding quantile --- we can use interpolation:
###Code
from scipy.interpolate import interp1d
ps = cumulative.values
qs = cumulative.index
interp = interp1d(ps, qs)
interp(0.96)
###Output
_____no_output_____
###Markdown
The result is about 0.61, so that confirms that the 96th percentile of this distribution is 0.61.`empiricaldist` provides a class called `Cdf` that represents a cumulative distribution function.Given a `Pmf`, you can compute a `Cdf` like this:
###Code
cdf = pmf.make_cdf()
###Output
_____no_output_____
###Markdown
`make_cdf` uses `np.cumsum` to compute the cumulative sum of the probabilities.You can use brackets to select an element from a `Cdf`:
###Code
cdf[0.61]
###Output
_____no_output_____
###Markdown
But if you look up a quantity that's not in the distribution, you get a `KeyError`.
###Code
try:
cdf[0.615]
except KeyError as e:
print('KeyError')
###Output
_____no_output_____
###Markdown
You can also call a `Cdf` as a function, using parentheses.If the argument does not appear in the `Cdf`, it interpolates between quantities.
###Code
cdf(0.615)
###Output
_____no_output_____
###Markdown
Going the other way, you can use `quantile` to look up a cumulative probability and get the corresponding quantity:
###Code
cdf.quantile(0.9638303)
###Output
_____no_output_____
###Markdown
`Cdf` also provides `credible_interval`, which computes a credible interval that contains the given probability:
###Code
cdf.credible_interval(0.9)
###Output
_____no_output_____
###Markdown
CDFs and PMFs are equivalent in the sense that they contain thesame information about the distribution, and you can always convertfrom one to the other.Given a `Cdf`, you can get the equivalent `Pmf` like this:
###Code
pmf = cdf.make_pmf()
###Output
_____no_output_____
###Markdown
`make_pmf` uses `np.diff` to compute differences between consecutive cumulative probabilities.One reason `Cdf` objects are useful is that they compute quantiles efficiently.Another is that they make it easy to compute the distribution of a maximum or minimum, as we'll see in the next section. Best Three of FourIn *Dungeons & Dragons*, each character has six attributes: strength, intelligence, wisdom, dexterity, constitution, and charisma.To generate a new character, players roll four 6-sided dice for each attribute and add up the best three.For example, if I roll for strength and get 1, 2, 3, 4 on the dice, my character's strength would be 9.As an exercise, let's figure out the distribution of these attributes.Then, for each character, we'll figure out the distribution of their best attribute.In the previous notebook, we computed the distribution of the sum of three dice like this:
###Code
def make_die(sides):
"""Pmf that represents a die with the given number of sides.
sides: int
returns: Pmf
"""
outcomes = np.arange(1, sides+1)
die = Pmf(1/sides, outcomes)
return die
def add_dist_seq(seq):
"""Distribution of sum of quantities from PMFs.
seq: sequence of Pmf objects
returns: Pmf
"""
total = seq[0]
for other in seq[1:]:
total = total.add_dist(other)
return total
die = make_die(6)
dice = [die] * 3
pmf_3d6 = add_dist_seq(dice)
###Output
_____no_output_____
###Markdown
Here's what that distribution looks like:
###Code
def decorate_dice(title=''):
decorate(xlabel='Outcome',
ylabel='PMF',
title=title)
pmf_3d6.plot()
decorate_dice('Distribution of attributes')
###Output
_____no_output_____
###Markdown
But if we roll four dice and add up the best three, computing the distribution of the sum is a bit more complicated.I'll estimate the distribution by simulating 10,000 rolls.First I'll create an array of random values from 1 to 6, with 10,000 rows and 4 columns:
###Code
n = 10000
a = np.random.randint(1, 7, size=(n, 4))
###Output
_____no_output_____
###Markdown
To find the best three outcomes in each row, I'll sort along `axis=1`, which means across the columns.
###Code
a.sort(axis=1)
###Output
_____no_output_____
###Markdown
Finally, I'll select the last three columns and add them up.
###Code
t = a[:, 1:].sum(axis=1)
###Output
_____no_output_____
###Markdown
Now `t` is an array with a single column and 10,000 rows.We can compute the PMF of the values in `t` like this:
###Code
pmf_4d6 = Pmf.from_seq(t)
###Output
_____no_output_____
###Markdown
The following figure shows the distribution of the sum of three dice, `pmf_3d6`, and the distribution of the best three out of four, `pmf_4d6`.
###Code
pmf_3d6.plot(label='sum of 3 dice')
pmf_4d6.plot(label='best 3 of 4')
decorate_dice('Distribution of attributes')
savefig('fig06-02')
###Output
_____no_output_____
###Markdown
As you might expect, choosing the best three out of four tends to yield higher values.Next we'll find the distribution for the maximum of six attributes, each the sum of the best three of four dice. MaximumTo compute the distribution of a maximum or minimum, we can make good use of the cumulative distribution function.First, I'll compute the `Cdf` of the best three of four distribution:
###Code
cdf_4d6 = pmf_4d6.make_cdf()
###Output
_____no_output_____
###Markdown
Recall that `Cdf(x)` is the sum of probabilities for quantities less than or equal to `x`.Equivalently, it is the probability that a random value chosen from the distribution is less than or equal to `x`.Now suppose I draw 6 values from this distribution.The probability that all 6 of them are less than or equal to `x` is `Cdf(x)` raised to the 6th power, which we can compute like this:
###Code
cdf_4d6**6
###Output
_____no_output_____
###Markdown
If all 6 values are less than or equal to `x`, that means that their maximum is less than or equal to `x`.So the result is the CDF of their maximum.We can convert it to a `Cdf` object, like this:
###Code
from empiricaldist import Cdf
cdf_max6 = Cdf(cdf_4d6**6)
###Output
_____no_output_____
###Markdown
And compute the equivalent `Pmf` like this:
###Code
pmf_max6 = cdf_max6.make_pmf()
###Output
_____no_output_____
###Markdown
The following figure shows the result.
###Code
pmf_max6.plot(label='max of 6 attributes')
decorate_dice('Distribution of attributes')
###Output
_____no_output_____
###Markdown
Most characters have at least one attribute greater than 12; almost 10\% of them have an 18.The following figure shows the CDFs for the three distributions we have computed.
###Code
cdf_3d6 = pmf_3d6.make_cdf()
cdf_3d6.plot(label='best 3 of 4 dice')
cdf_4d6 = pmf_4d6.make_cdf()
cdf_4d6.plot(label='sum of 3 dice')
cdf_max6.plot(label='max of 6 attributes')
decorate_dice('Distribution of attributes')
plt.ylabel('CDF');
###Output
_____no_output_____
###Markdown
`Pmf` and `Cdf` provide `max_dist`, which does the same computation.We can compute the `Pmf` of the maximum like this:
###Code
pmf_max_dist6 = pmf_4d6.max_dist(6)
###Output
_____no_output_____
###Markdown
And the `Cdf` of the maximum like this:
###Code
cdf_max_dist6 = cdf_4d6.max_dist(6)
###Output
_____no_output_____
###Markdown
And we can confirm that the differences are small.
###Code
np.max(np.abs(pmf_max_dist6 - pmf_max6))
np.max(np.abs(cdf_max_dist6 - cdf_max6))
###Output
_____no_output_____
###Markdown
In the next section we'll find the distribution of the minimum.The process is similar, but a little more complicated.See if you can figure it out before you go on. MinimumIn the previous section we computed the distribution of a character's best attribute.Now let's compute the distribution of the worst.To compute the distribution of the minimum, we'll use the **complementary CDF**, which we can compute like this:
###Code
prob_gt = 1 - cdf_4d6
prob_gt
###Output
_____no_output_____
###Markdown
As the variable name suggests, the complementary CDF is the probability that a value from the distribution is greater than `x`.If we draw 6 values from the distribution, the probability that all 6 exceed `x` is:
###Code
prob_gt6 = prob_gt**6
prob_gt6
###Output
_____no_output_____
###Markdown
If all 6 exceed `x`, that means their minimum exceeds `x`, so `prob_gt6` is the complementary CDF of the minimum.And that means we can compute the CDF of the minimum like this:
###Code
prob_le6 = 1-prob_gt6
prob_le6
###Output
_____no_output_____
###Markdown
The result is a Pandas Series that represents the CDF of the minimum of six attributes. We can put those values in a `Cdf` object like this:
###Code
cdf_min6 = Cdf(prob_le6)
###Output
_____no_output_____
###Markdown
Here's what it looks like.
###Code
cdf_min6.plot(color='C2', label='minimum of 6')
cdf_max6.plot(color='C3', label='maximum of 6')
decorate_dice('Minimum and maximum of six attributes')
plt.ylabel('CDF')
savefig('fig06-03')
###Output
_____no_output_____
###Markdown
From the `Cdf` we can make the corresponding `Pmf`:
###Code
pmf_min6 = cdf_min6.make_pmf()
###Output
_____no_output_____
###Markdown
`Pmf` and `Cdf` provide `min_dist`, which does the same computation.We can compute the `Pmf` of the minimum like this:
###Code
pmf_min_dist6 = pmf_4d6.min_dist(6)
###Output
_____no_output_____
###Markdown
And the `Cdf` of the minimum like this:
###Code
cdf_min_dist6 = cdf_4d6.min_dist(6)
###Output
_____no_output_____
###Markdown
And we can confirm that the differences are small.
###Code
np.max(np.abs(pmf_min_dist6 - pmf_min6))
np.max(np.abs(cdf_min_dist6 - cdf_min6))
###Output
_____no_output_____
###Markdown
In the exercises at the end of this notebook, you'll use distributions of the minimum and maximum to do Bayesian inference.But first we'll see what happens when we mix distributions. MixtureLet's do one more example inspired by *Dungeons & Dragons*.Suppose I have a 4-sided die and a 6-sided die.I choose one of them at random and roll it.What is the distribution of the outcome?If you know which die it is, the answer is easy.A die with `n` sides yields a uniform distribution from 1 to `n`, including both.We can compute `Pmf` objects to represent the dice, like this:
###Code
d4 = make_die(4)
d6 = make_die(6)
###Output
_____no_output_____
###Markdown
To compute the distribution of the mixture, we can compute the average of the two distributions by adding them and dividing the result by 2:
###Code
total = Pmf.add(d4, d6, fill_value=0) / 2
total
###Output
_____no_output_____
###Markdown
We have to use `Pmf.add` with `fill_value=0` because the two distributions don't have the same set of quantities.If they did, we could use the `+` operator.Here's what the mixture of these distributions looks like.
###Code
mix = Pmf(total)
mix.bar(alpha=0.7)
decorate_dice()
###Output
_____no_output_____
###Markdown
Now suppose I have a 4-sided die and *two* 6-sided dice.Again, I choose one of them at random and roll it.What is the distribution of the outcome?We can solve this problem by computing a weighted average of the distributions, like this:
###Code
total = Pmf.add(d4, 2*d6, fill_value=0) / 3
###Output
_____no_output_____
###Markdown
Here's what it looks like.
###Code
mix = Pmf(total)
mix.normalize()
mix.bar(alpha=0.7)
decorate_dice()
###Output
_____no_output_____
###Markdown
Finally, suppose we have a box with the following mix:```1 4-sided die2 6-sided dice3 8-sided dice```If I draw a die from this mix at random, we can use a `Pmf` to represent the hypothetical number of sides on the die:
###Code
hypos = [4,6,8]
counts = [1,2,3]
pmf_dice = Pmf(counts, hypos)
pmf_dice.normalize()
pmf_dice
###Output
_____no_output_____
###Markdown
And I'll make a sequence of `Pmf` objects to represent the dice:
###Code
dice = [make_die(sides) for sides in hypos]
###Output
_____no_output_____
###Markdown
Now we have to multiply each distribution in `dice` by the corresponding probabilities in `pmf_dice`.To express this computation concisely, it is convenient to put the distributions into a Pandas DataFrame:
###Code
pd.DataFrame(dice)
###Output
_____no_output_____
###Markdown
The result is a DataFrame with one row for each distribution and one column for each possible outcome.Not all rows are the same length, so Pandas fills the extra spaces with the special value `NaN`, which stands for ``not a number''.We can use `fillna` to replace the `NaN` values with 0.
###Code
pd.DataFrame(dice).fillna(0)
###Output
_____no_output_____
###Markdown
Before we multiply by the probabilities in `pmf_dice`, we have to transpose the matrix so the distributions run down the columns rather than across the rows:
###Code
df = pd.DataFrame(dice).fillna(0).transpose()
df
###Output
_____no_output_____
###Markdown
Now we can multiply by the probabilities:
###Code
df *= pmf_dice.ps
df
###Output
_____no_output_____
###Markdown
And add up the weighted distributions:
###Code
df.sum(axis=1)
###Output
_____no_output_____
###Markdown
The argument `axis=1` means we want to sum across the rows.The result is a Pandas Series.Putting it all together, here's a function that makes a weighted mixture of distributions.
###Code
def make_mixture(pmf, pmf_seq):
"""Make a mixture of distributions.
pmf: mapping from each hypothesis to its probability
pmf_seq: sequence of Pmfs, each representing
a conditional distribution for one hypothesis
returns: Pmf representing the mixture
"""
df = pd.DataFrame(pmf_seq).fillna(0).transpose()
df *= pmf.ps
total = df.sum(axis=1)
return Pmf(total)
###Output
_____no_output_____
###Markdown
The first parameter is a `Pmf` that makes from each hypothesis to a probability.The second parameter is a sequence of `Pmf` objects, one for each hypothesis.We can call it like this:
###Code
mix = make_mixture(pmf_dice, dice)
###Output
_____no_output_____
###Markdown
And here's what it looks like.
###Code
mix.bar(label='mixture', alpha=0.6)
decorate_dice('Mixture of Uniform Distributions')
savefig('fig06-04')
###Output
_____no_output_____
###Markdown
Summary We have seen two representations of distributions: `Pmf` and `Cdf` objects.These representations are equivalent in the sense that they containthe same information, so you can convert from one to the other. Theprimary difference between them is performance: some operations arefaster and easier with a Pmf; others are faster with a Cdf.In this chapter we used `Cdf` objects to compute distributions of maxima and minima; these distributions are useful for inference if we are given a maximum or minimum as data.We also computed mixtures of distributions, which we will use in the next chapter to make predictions. Exercises **Exercise:** When you generate a D&D character, instead of rolling dice, you can use the "standard array" of attributes, which is 15, 14, 13, 12, 10, and 8.Do you think you are better off using the standard array or (literally) rolling the dice?Compare the distribution of the values in the standard array to the distribution we computed for the best three out of four:* Which distribution has higher mean? Use the `mean` method.* Which distribution has higher standard deviation? Use the `std` method.* The lowest value in the standard array is 8. For each attribute, what is the probability of getting a value less than 8? If you roll the dice six times, what's the probability that at least one of your attributes is less than 8?* The highest value in the standard array is 15. For each attribute, what is the probability of getting a value greater than 15? If you roll the dice six times, what's the probability that at least one of your attributes is greater than 15? To get you started, here's a `Cdf` that represents the distribution of attributes in the standard array:
###Code
standard = [15,14,13,12,10,8]
cdf_standard = Cdf.from_seq(standard)
###Output
_____no_output_____
###Markdown
We can compare it to the distribution of attributes you get by rolling four dice at adding up the best three.
###Code
cdf_4d6.plot(label='max of 6 attributes')
cdf_standard.step(label='standard set')
decorate_dice('Distribution of attributes')
plt.ylabel('CDF');
###Output
_____no_output_____
###Markdown
I plotted `cdf_standard` as a step function to show more clearly that it contains only a few quantities.
###Code
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** Suppose I have a box with a 6-sided die, an 8-sided die, and a 12-sided die.I choose one of the dice at random, roll it, and report that the outcome is a 1.If I roll the same die again, what is the probability that I get another 1?Hint: Compute the posterior distribution as we have done before and pass it as one of the arguments to `make_mixture`.
###Code
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** Suppose I have two boxes of dice:* One contains a 4-sided die and a 6-sided die.* The other contains a 6-sided die and an 8-sided die.I choose a box at random, choose a die, and roll it 3 times. If I get 2, 4, and 6, which box do you think I chose?
###Code
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** Henri Poincaré was a French mathematician who taught at the Sorbonne around 1900. The following anecdote about him is probably fabricated, but it makes an interesting probability problem.Supposedly Poincaré suspected that his local bakery was selling loaves of bread that were lighter than the advertised weight of 1 kg, so every day for a year he bought a loaf of bread, brought it home and weighed it. At the end of the year, he plotted the distribution of his measurements and showed that it fit a normal distribution with mean 950 g and standard deviation 50 g. He brought this evidence to the bread police, who gave the baker a warning.For the next year, Poincaré continued the practice of weighing his bread every day. At the end of the year, he found that the average weight was 1000 g, just as it should be, but again he complained to the bread police, and this time they fined the baker.Why? Because the shape of the distribution was asymmetric. Unlike the normal distribution, it was skewed to the right, which is consistent with the hypothesis that the baker was still making 950 g loaves, but deliberately giving Poincaré the heavier ones.To see whether this anecdote is plausible, let's suppose that when the baker sees Poincaré coming, he hefts `n` loaves of bread and gives Poincaré the heaviest one. How many loaves would the baker have to heft to make the average of the maximum 1000 g?To get you started, I'll generate a year's worth of data from a normal distribution with the given parameters.
###Code
mean = 950
std = 50
sample = np.random.normal(mean, std, size=365)
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** Two doctors fresh out of medical school are arguing about whose hospital delivers more babies. The first doctor says, "I've been at Hospital A for two weeks, and already we've had a day when we delivered 20 babies."The second doctor says, "I've only been at Hospital B for one week, but already there's been a 19-baby day."Which hospital do you think delivers more babies on average? You can assume that the number of babies born in a day is well modeled by a [Poisson distribution](https://en.wikipedia.org/wiki/Poisson_distribution) with parameter $\lambda$.For a hypothetical value of $\lambda$, you can compute the PMF of a Poisson distribution like this:
###Code
from scipy.stats import poisson
def make_poisson(lam):
high = np.round(lam * 4)
qs = np.arange(0, int(high))
ps = poisson(lam).pmf(qs)
pmf = Pmf(ps, qs)
pmf.normalize()
return pmf
###Output
_____no_output_____
###Markdown
For example, if the actual value of $\lambda$ is 8, the distribution of babies born in a single day looks like this:
###Code
pmf = make_poisson(8)
pmf.plot()
decorate(xlabel='Number of babies',
ylabel='PMF',
title='Distribution of babies in a single day')
###Output
_____no_output_____
###Markdown
The mean of this distribution is the parameter, $\lambda$.
###Code
pmf.mean()
###Output
_____no_output_____
###Markdown
And here's what the distributions look like for the maximum number of babies after one week or two weeks.
###Code
pmf_max = pmf.max_dist(2 * 7)
pmf_max.plot(label='two weeks')
pmf_max = pmf.max_dist(7)
pmf_max.plot(label='one week')
decorate(xlabel='Number of babies',
ylabel='PMF',
title='Distribution of maximum babies in one day')
###Output
_____no_output_____
###Markdown
Now you finish it off from there.
###Code
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** This question is related to [a method I developed](https://patents.google.com/patent/US8385227B1/en) for estimating the minimum time for a packet of data to travel through a path in the internet.Suppose I drive the same route three times and the fastest of the three attempts takes 8 minutes.There are two traffic lights on the route. As I approach each light, there is a 40% chance that it is green; in that case, it causes no delay. And there is a 60% change it is red; in that case it causes a delay that is uniformly distributed from 0 to 60 seconds.What is the posterior distribution of the time it would take to drive the route with no delays?To get you started, here is the distribution of delays if the light is red.
###Code
qs = np.arange(1, 61)
pmf_delay = Pmf(1, qs)
pmf_delay.normalize()
###Output
_____no_output_____
###Markdown
And the distribution of delays if the light is green: always 0.
###Code
pmf_nodelay = Pmf.from_seq([0])
pmf_nodelay
###Output
_____no_output_____
###Markdown
Here is the mixture of delays due to red and green lights.
###Code
pmf = Pmf([0.4, 0.6])
pmf_total = make_mixture(pmf, [pmf_nodelay, pmf_delay])
pmf_total
###Output
_____no_output_____
###Markdown
Now I suggest the following steps:1. Compute the distribution for the sum of two delays.2. Compute the distribution for the lowest total delay after three attempts.3. Make a prior distribution with a range of possible values for the no-delay travel time.4. For each hypothesis, compute the likelihood of the observed minimum travel time, 8 minutes.5. Compute the posterior distribution for the no-delay travel time.
###Code
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
Odds and Addends Think Bayes, Second EditionCopyright 2020 Allen B. DowneyLicense: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/)
###Code
# If we're running on Colab, install empiricaldist
# https://pypi.org/project/empiricaldist/
import sys
IN_COLAB = 'google.colab' in sys.modules
if IN_COLAB:
!pip install empiricaldist
# Get utils.py and create directories
import os
if not os.path.exists('utils.py'):
!wget https://github.com/AllenDowney/ThinkBayes2/raw/master/soln/utils.py
from utils import set_pyplot_params
set_pyplot_params()
###Output
_____no_output_____
###Markdown
This chapter presents a new way to represent a degree of certainty, **odds**, and a new form of Bayes's Theorem, called **Bayes's Rule**.Bayes's Rule is convenient if you want to do a Bayesian update on paper or in your head.It also sheds light on the important idea of **evidence** and how we can quantify the strength of evidence.The second part of the chapter is about "addends", that is, quantities being added, and how we can compute their distributions.We'll define functions that compute the distribution of sums, differences, products, and other operations.Then we'll use those distributions as part of a Bayesian update. OddsOne way to represent a probability is with a number between 0 and 1, but that's not the only way.If you have ever bet on a football game or a horse race, you have probably encountered another representation of probability, called **odds**.You might have heard expressions like "the odds are three to one", but you might not know what that means.The **odds in favor** of an event are the ratio of the probabilityit will occur to the probability that it will not.The following function does this calculation.
###Code
def odds(p):
return p / (1-p)
###Output
_____no_output_____
###Markdown
For example, if my team has a 75% chance of winning, the odds in their favor are three to one, because the chance of winning is three times the chance of losing.
###Code
odds(0.75)
###Output
_____no_output_____
###Markdown
You can write odds in decimal form, but it is also common towrite them as a ratio of integers. So "three to one" is sometimes written $3:1$.When probabilities are low, it is more common to report the**odds against** rather than the odds in favor. For example, if my horse has a 10% chance of winning, the odds in favor are $1:9$.
###Code
odds(0.1)
###Output
_____no_output_____
###Markdown
But in that case it would be more common I to say that the odds against are $9:1$.
###Code
odds(0.9)
###Output
_____no_output_____
###Markdown
Given the odds in favor, in decimal form, you can convert to probability like this:
###Code
def prob(o):
return o / (o+1)
###Output
_____no_output_____
###Markdown
For example, if the odds are $3/2$, the corresponding probability is $3/5$:
###Code
prob(3/2)
###Output
_____no_output_____
###Markdown
Or if you represent odds with a numerator and denominator, you can convert to probability like this:
###Code
def prob2(yes, no):
return yes / (yes + no)
prob2(3, 2)
###Output
_____no_output_____
###Markdown
Probabilities and odds are different representations of thesame information. Given either one, you can compute the other. Bayes's RuleSo far we have worked with Bayes's theorem in the "probability form": $$P(H|D) = \frac{P(H)~P(D|H)}{P(D)}$$Writing $\mathrm{odds}(A)$ for odds in favor of $A$, we can express Bayes's Theorem in "odds form":$$\mathrm{odds}(A|D) = \mathrm{odds}(A)~\frac{P(D|A)}{P(D|B)}$$This is Bayes's Rule, which says that the posterior odds are the prior odds times the likelihood ratio.Bayes's Rule is convenient for computing a Bayesian update on paper or in your head. For example, let's go back to the cookie problem:> Suppose there are two bowls of cookies. Bowl 1 contains 30 vanilla cookies and 10 chocolate cookies. Bowl 2 contains 20 of each. Now suppose you choose one of the bowls at random and, without looking, select a cookie at random. The cookie is vanilla. What is the probability that it came from Bowl 1?The prior probability is 50%, so the prior odds are 1. The likelihood ratio is $\frac{3}{4} / \frac{1}{2}$, or $3/2$. So the posterior odds are $3/2$, which corresponds to probability $3/5$.
###Code
prior_odds = 1
likelihood_ratio = (3/4) / (1/2)
post_odds = prior_odds * likelihood_ratio
post_odds
post_prob = prob(post_odds)
post_prob
###Output
_____no_output_____
###Markdown
If we draw another cookie and it's chocolate, we can do another update:
###Code
likelihood_ratio = (1/4) / (1/2)
post_odds *= likelihood_ratio
post_odds
###Output
_____no_output_____
###Markdown
And convert back to probability.
###Code
post_prob = prob(post_odds)
post_prob
###Output
_____no_output_____
###Markdown
Oliver's bloodI’ll use Bayes’s Rule to solve another problem from MacKay’s[*Information Theory, Inference, and Learning Algorithms*](https://www.inference.org.uk/mackay/itila/):> Two people have left traces of their own blood at the scene of a crime. A suspect, Oliver, is tested and found to have type ‘O’ blood. The blood groups of the two traces are found to be of type ‘O’ (a common type in the local population, having frequency 60%) and of type ‘AB’ (a rare type, with frequency 1%). Do these data \[the traces found at the scene\] give evidence in favor of the proposition that Oliver was one of the people \[who left blood at the scene\]?To answer this question, we need to think about what it means for datato give evidence in favor of (or against) a hypothesis. Intuitively, we might say that data favor a hypothesis if the hypothesis is more likely in light of the data than it was before.In the cookie problem, the prior odds are $1$, or probability 50%. Theposterior odds are $3/2$, or probability 60%. So the vanilla cookie isevidence in favor of Bowl 1. Bayes's Rule provides a way to make this intuition more precise. Again$$\mathrm{odds}(A|D) = \mathrm{odds}(A)~\frac{P(D|A)}{P(D|B)}$$Dividing through by $\mathrm{odds}(A)$, we get: $$\frac{\mathrm{odds}(A|D)}{\mathrm{odds}(A)} = \frac{P(D|A)}{P(D|B)}$$The term on the left is the ratio of the posterior and prior odds. The term on the right is the likelihood ratio, also called the **Bayesfactor**.If the Bayes factor is greater than 1, that means that the data weremore likely under $A$ than under $B$. And that means that the odds aregreater, in light of the data, than they were before.If the Bayes factor is less than 1, that means the data were less likely under $A$ than under $B$, so the odds in favor of $A$ go down.Finally, if the Bayes factor is exactly 1, the data are equally likelyunder either hypothesis, so the odds do not change. Let's apply that to the problem at hand. If Oliver is one of the people who left blood at the crime scene, he accounts for the ‘O’ sample; in that case, the probability of the data is the probability that a random member of the population has type ‘AB’ blood, which is 1%.If Oliver did not leave blood at the scene, we have two samples toaccount for. If we choose two random people from the population, what is the chance of finding one with type ‘O’ and one with type ‘AB’? Well, there are two ways it might happen: * The first person might have ‘O’ and the second ‘AB’, * Or the first person might have ‘AB’ and the second ‘O’.The probability of either combination is $(0.6) (0.01)$, which is 0.6%, so the total probability is twice that, or 1.2%.So the data are a little more likely if Oliver is *not* one of the people who left blood at the scene.We can use these probabilities to compute the likelihood ratio:
###Code
like1 = 0.01
like2 = 2 * 0.6 * 0.01
likelihood_ratio = like1 / like2
likelihood_ratio
###Output
_____no_output_____
###Markdown
Since the likelihood ratio is less than 1, the blood tests are evidence *against* the hypothesis that Oliver left blood at the scence.But it is weak evidence. For example, if the prior odds were 1 (that is, 50% probability), the posterior odds would be 0.83, which corresponds to a probability of 45%:
###Code
post_odds = 1 * like1 / like2
prob(post_odds)
###Output
_____no_output_____
###Markdown
So this evidence doesn't "move the needle" very much.This example is a little contrived, but it is demonstrates thecounterintuitive result that data *consistent* with a hypothesis arenot necessarily *in favor of* the hypothesis.If this result still bothers you, this way of thinking might help: thedata consist of a common event, type ‘O’ blood, and a rare event, type‘AB’ blood. If Oliver accounts for the common event, that leaves therare event unexplained. If Oliver doesn’t account for the ‘O’ blood, wehave two chances to find someone in the population with ‘AB’ blood. Andthat factor of two makes the difference. **Exercise:** Suppose other evidence made you 90% confident of Oliver's guilt. How much would this exculpatory evidence change your beliefs? What if you initially thought there was only a 10% chance of his guilt?
###Code
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
AddendsThe second half of this chapter is about distributions of sums and results of other operations.We'll start with a forward problem, where we are given the inputs and compute the distribution of the output.Then we'll work on inverse problems, where we are given the outputs and we compute the distribution of the inputs.As a first example, suppose you roll two dice and add them up. What is the distribution of the sum? I’ll use the following function to create a `Pmf` that represents thepossible outcomes of a die:
###Code
import numpy as np
from empiricaldist import Pmf
def make_die(sides):
outcomes = np.arange(1, sides+1)
die = Pmf(1/sides, outcomes)
return die
###Output
_____no_output_____
###Markdown
On a six-sided die, the outcomes are 1 through 6, allequally likely.
###Code
die = make_die(6)
from utils import decorate
die.bar(alpha=0.4)
decorate(xlabel='Outcome',
ylabel='PMF')
###Output
_____no_output_____
###Markdown
If we roll two dice and add them up, there are 11 possible outcomes, 2through 12, but they are not equally likely. To compute the distributionof the sum, we have to enumerate the possible outcomes. And that's how this function works:
###Code
def add_dist(pmf1, pmf2):
"""Compute the distribution of a sum."""
res = Pmf()
for q1, p1 in pmf1.items():
for q2, p2 in pmf2.items():
q = q1 + q2
p = p1 * p2
res[q] = res(q) + p
return res
###Output
_____no_output_____
###Markdown
The parameters are `Pmf` objects representing distributions.The loops iterate though the quantities and probabilities in the `Pmf` objects.Each time through the loop `q` gets the sum of a pair of quantities, and `p` gets the probability of the pair.Because the same sum might appear more than once, we have to add up the total probability for each sum. Notice a subtle element of this line:``` res[q] = res(q) + p```I use parentheses on the right side of the assignment, which returns 0 if `q` does not appear yet in `res`.I use brackets on the left side of the assignment to create or update an element in `res`; using parentheses on the left side would not work.`Pmf` provides `add_dist`, which does the same thing.You can call it as a method, like this:
###Code
twice = die.add_dist(die)
###Output
_____no_output_____
###Markdown
Or as a function, like this:
###Code
twice = Pmf.add_dist(die, die)
###Output
_____no_output_____
###Markdown
And here's what the result looks like:
###Code
from utils import decorate
def decorate_dice(title=''):
decorate(xlabel='Outcome',
ylabel='PMF',
title=title)
twice = add_dist(die, die)
twice.bar(color='C1', alpha=0.5)
decorate_dice()
###Output
_____no_output_____
###Markdown
If we have a sequence of `Pmf` objects that represent dice, we can compute the distribution of the sum like this:
###Code
def add_dist_seq(seq):
"""Compute Pmf of the sum of values from seq."""
total = seq[0]
for other in seq[1:]:
total = total.add_dist(other)
return total
###Output
_____no_output_____
###Markdown
As an example, we can make a list of three dice like this:
###Code
dice = [die] * 3
###Output
_____no_output_____
###Markdown
And we can compute the distribution of their sum like this.
###Code
thrice = add_dist_seq(dice)
###Output
_____no_output_____
###Markdown
The following figure shows what these three distributions look like:- The distribution of a single die is uniform from 1 to 6.- The sum of two dice has a triangle distribution between 2 and 12.- The sum of three dice has a bell-shaped distribution between 3 and 18.
###Code
import matplotlib.pyplot as plt
die.plot(label='once')
twice.plot(label='twice')
thrice.plot(label='thrice')
plt.xticks([0,3,6,9,12,15,18])
decorate_dice(title='Distributions of sums')
###Output
_____no_output_____
###Markdown
As an aside, this example demonstrates the Central Limit Theorem, which says that the distribution of a sum converges on a bell-shaped normal distribution, at least under some conditions. Gluten sensitivityIn 2015 I read a paper that tested whether people diagnosed with gluten sensitivity (but not celiac disease) were able to distinguish gluten flour from non-gluten flour in a blind challenge([you can read the paper here](https://onlinelibrary.wiley.com/doi/full/10.1111/apt.13372)).Out of 35 subjects, 12 correctly identified the gluten flour based onresumption of symptoms while they were eating it. Another 17 wronglyidentified the gluten-free flour based on their symptoms, and 6 wereunable to distinguish.The authors conclude, "Double-blind gluten challenge induces symptomrecurrence in just one-third of patients."This conclusion seems odd to me, because if none of the patients weresensitive to gluten, we would expect some of them to identify the gluten flour by chance. So here's the question: based on this data, how many of the subjects are sensitive to gluten and how many are guessing?We can use Bayes's Theorem to answer this question, but first we have to make some modeling decisions. I'll assume:- People who are sensitive to gluten have a 95% chance of correctly identifying gluten flour under the challenge conditions, and- People who are not sensitive have a 40% chance of identifying the gluten flour by chance (and a 60% chance of either choosing the other flour or failing to distinguish).These particular values are arbitrary, but the results are not sensitive to these choices.I will solve this problem in two steps. First, assuming that we know how many subjects are sensitive, I will compute the distribution of the data. Then, using the likelihood of the data, I will compute the posterior distribution of the number of sensitive patients.The first is the **forward problem**; the second is the **inverseproblem**. The forward problemSuppose we know that 10 of the 35 subjects are sensitive to gluten. Thatmeans that 25 are not:
###Code
n = 35
n_sensitive = 10
n_insensitive = n - n_sensitive
###Output
_____no_output_____
###Markdown
Each sensitive subject has a 95% chance of identifying the gluten flour,so the number of correct identifications follows a binomial distribution.I'll use `make_binomial`, which we defined in Section xx, to make a `Pmf` that represents the binomial distribution.
###Code
from utils import make_binomial
dist_sensitive = make_binomial(n_sensitive, 0.95)
dist_insensitive = make_binomial(n_insensitive, 0.40)
###Output
_____no_output_____
###Markdown
The results are the distributions for the number of correct identifications in each group.Now we can use `add_dist` to compute the total number of correct identifications:
###Code
dist_total = Pmf.add_dist(dist_sensitive, dist_insensitive)
###Output
_____no_output_____
###Markdown
Here are the results:
###Code
dist_sensitive.plot(label='sensitive', linestyle='dashed')
dist_insensitive.plot(label='insensitive', linestyle='dashed')
dist_total.plot(label='total')
decorate(xlabel='Number of correct identifications',
ylabel='PMF',
title='Gluten sensitivity')
###Output
_____no_output_____
###Markdown
We expect most of the sensitive subjects to identify the gluten flour correctly.Of the 25 insensitive subjects, we expect about 10 to identify the gluten flour by chance.So we expect about 20 correct identifications in total.This is the answer to the forward problem: given the number of sensitive subjects, we can compute the distribution of the data. The inverse problemNow let's solve the inverse problem: given the data, we'll compute the posterior distribution of the number of sensitive subjects.Here's how. I'll loop through the possible values of `n_sensitive` and compute the distribution of the data for each:
###Code
import pandas as pd
table = pd.DataFrame()
for n_sensitive in range(0, n+1):
n_insensitive = n - n_sensitive
dist_sensitive = make_binomial(n_sensitive, 0.95)
dist_insensitive = make_binomial(n_insensitive, 0.4)
dist_total = Pmf.add_dist(dist_sensitive, dist_insensitive)
table[n_sensitive] = dist_total
###Output
_____no_output_____
###Markdown
The loop enumerates the possible values of `n_sensitive`.For each value, it computes the distribution of the total number of correct identifications, and stores the result as a column in a Pandas `DataFrame`.
###Code
table.head(3)
###Output
_____no_output_____
###Markdown
The following figure shows selected columns from the `DataFrame`, corresponding to different hypothetical values of `n_sensitive`:
###Code
table[0].plot(label='n_sensitive = 0')
table[10].plot(label='n_sensitive = 10')
table[20].plot(label='n_sensitive = 20', linestyle='dashed')
table[30].plot(label='n_sensitive = 30', linestyle='dotted')
decorate(xlabel='Number of correct identifications',
ylabel='PMF',
title='Gluten sensitivity')
###Output
_____no_output_____
###Markdown
Now we can use this table to compute the likelihood of the data:
###Code
likelihood1 = table.loc[12]
###Output
_____no_output_____
###Markdown
`loc` selects a row from the `DataFrame`.The row with index 12 contains the probability of 12 correct identifications for each hypothetical value of `n_sensitive`.And that's exactly the likelihood we need to do a Bayesian update.I'll use a uniform prior, which implies that I would be equally surprised by any value of `n_sensitive`:
###Code
hypos = np.arange(n+1)
prior = Pmf(1, hypos)
###Output
_____no_output_____
###Markdown
And here's the update:
###Code
posterior1 = prior * likelihood1
posterior1.normalize()
###Output
_____no_output_____
###Markdown
For comparison, I also compute the posterior for another possible outcome, 20 correct identifications.
###Code
likelihood2 = table.loc[20]
posterior2 = prior * likelihood2
posterior2.normalize()
###Output
_____no_output_____
###Markdown
The following figure shows posterior distributions of `n_sensitive` based on the actual data, 12 correct identifications, and the other possible outcome, 20 correct identifications.
###Code
posterior1.plot(label='posterior with 12 correct')
posterior2.plot(label='posterior with 20 correct')
decorate(xlabel='Number of sensitive subjects',
ylabel='PMF',
title='Posterior distributions')
###Output
_____no_output_____
###Markdown
With 12 correct identifications, the most likely conclusion is that none of the subjects are sensitive to gluten.If there had been 20 correct identifications, the most likely conclusion would be that 11-12 of the subjects were sensitive.
###Code
posterior1.max_prob()
posterior2.max_prob()
###Output
_____no_output_____
###Markdown
SummaryThis chapter presents two topics that are almost unrelated except that they make the title of the chapter catchy.The first part of the chapter is about Bayes's Rule, evidence, and how we can quantify the strength of evidence using a likelihood ratio or Bayes factor.The second part is about functions that compute the distribution of a sum, product, or the result of another binary operation.We can use these functions to solve forward problems and inverse problems; that is, given the parameters of a system, we can compute the distribution of the data or, given the data, we can compute the distribution of the parameters.In the next chapter, we'll compute distributions for minimums and maximums, and use them to solve more Bayesian problems.But first you might want to work on these exercises. Exercises **Exercise:** Let's use Bayes's Rule to solve the Elvis problem from Chapter xxx:> Elvis Presley had a twin brother who died at birth. What is the probability that Elvis was an identical twin?In 1935, about 2/3 of twins were fraternal and 1/3 were identical.The question contains two pieces of information we can use to update this prior.* First, Elvis's twin was also male, which is more likely if they were identical twins, with a likelihood ratio of 2.* Also, Elvis's twin died at birth, which is more likely if they were identical twins, with a likelihood ratio of 1.25.If you are curious about where those numbers come from, I wrote [a blog post about it](https://www.allendowney.com/blog/2020/01/28/the-elvis-problem-revisited).
###Code
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** The following is an [interview question that appeared on glassdoor.com](https://www.glassdoor.com/Interview/You-re-about-to-get-on-a-plane-to-Seattle-You-want-to-know-if-you-should-bring-an-umbrella-You-call-3-random-friends-of-y-QTN_519262.htm), attributed to Facebook:> You're about to get on a plane to Seattle. You want to know if you should bring an umbrella. You call 3 random friends of yours who live there and ask each independently if it's raining. Each of your friends has a 2/3 chance of telling you the truth and a 1/3 chance of messing with you by lying. All 3 friends tell you that "Yes" it is raining. What is the probability that it's actually raining in Seattle?Use Bayes's Rule to solve this problem. As a prior you can assume that it rains in Seattle about 10% of the time.This question causes some confusion about the differences between Bayesian and frequentist interpretations of probability; if you are curious about this point, [I wrote a blog article about it](http://allendowney.blogspot.com/2016/09/bayess-theorem-is-not-optional.html).
###Code
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** [According to the CDC](https://www.cdc.gov/tobacco/data_statistics/fact_sheets/health_effects/effects_cig_smoking), people who smoke are about 25 times more likely to develop lung cancer than nonsmokers.[Also according to the CDC](https://www.cdc.gov/tobacco/data_statistics/fact_sheets/adult_data/cig_smoking/index.htm), about 14\% of adults in the U.S. are smokers.If you learn that someone has lung cancer, what is the probability they are a smoker?
###Code
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** In *Dungeons & Dragons*, the amount of damage a goblin can withstand is the sum of two six-sided dice. The amount of damage you inflict with a short sword is determined by rolling one six-sided die.A goblin is defeated if the total damage you inflict is greater than or equal to the amount it can withstand.Suppose you are fighting a goblin and you have already inflicted 3 points of damage. What is your probability of defeating the goblin with your next successful attack?Hint: You can use `Pmf.add_dist` to add a constant amount, like 3, to a `Pmf` and `Pmf.sub_dist` to compute the distribution of remaining points.
###Code
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** Suppose I have a box with a 6-sided die, an 8-sided die, and a 12-sided die.I choose one of the dice at random, roll it twice, multiply the outcomes, and report that the product is 12.What is the probability that I chose the 8-sided die?Hint: `Pmf` provides a function called `mul_dist` that takes two `Pmf` objects and returns a `Pmf` that represents the distribution of the product.
###Code
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** *Betrayal at House on the Hill* is a strategy game in which characters with different attributes explore a haunted house. Depending on their attributes, the characters roll different numbers of dice. For example, if attempting a task that depends on knowledge, Professor Longfellow rolls 5 dice, Madame Zostra rolls 4, and Ox Bellows rolls 3. Each die yields 0, 1, or 2 with equal probability. If a randomly chosen character attempts a task three times and rolls a total of 3 on the first attempt, 4 on the second, and 5 on the third, which character do you think it was?
###Code
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** There are 538 members of the United States Congress. Suppose we audit their investment portfolios and find that 312 of them out-perform the market.Let's assume that an honest member of Congress has only a 50% chance of out-performing the market, but a dishonest member who trades on inside information has a 90% chance. How many members of Congress are honest?
###Code
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
Modeling and Simulation in PythonChapter 6Copyright 2017 Allen DowneyLicense: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)
###Code
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
from pandas import read_html
###Output
_____no_output_____
###Markdown
Code from the previous chapter
###Code
filename = 'data/World_population_estimates.html'
tables = read_html(filename, header=0, index_col=0, decimal='M')
table2 = tables[2]
table2.columns = ['census', 'prb', 'un', 'maddison',
'hyde', 'tanton', 'biraben', 'mj',
'thomlinson', 'durand', 'clark']
un = table2.un / 1e9
un.head()
census = table2.census / 1e9
census.head()
t_0 = get_first_label(census)
t_end = get_last_label(census)
elapsed_time = t_end - t_0
p_0 = get_first_value(census)
p_end = get_last_value(census)
total_growth = p_end - p_0
annual_growth = total_growth / elapsed_time
###Output
_____no_output_____
###Markdown
System objects We can rewrite the code from the previous chapter using system objects.
###Code
system = System(t_0=t_0,
t_end=t_end,
p_0=p_0,
annual_growth=annual_growth)
###Output
_____no_output_____
###Markdown
And we can encapsulate the code that runs the model in a function.
###Code
def run_simulation1(system):
"""Runs the constant growth model.
system: System object
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linrange(system.t_0, system.t_end):
results[t+1] = results[t] + system.annual_growth
return results
###Output
_____no_output_____
###Markdown
We can also encapsulate the code that plots the results.
###Code
def plot_results(census, un, timeseries, title):
"""Plot the estimates and the model.
census: TimeSeries of population estimates
un: TimeSeries of population estimates
timeseries: TimeSeries of simulation results
title: string
"""
plot(census, ':', label='US Census')
plot(un, '--', label='UN DESA')
plot(timeseries, color='gray', label='model')
decorate(xlabel='Year',
ylabel='World population (billion)',
title=title)
###Output
_____no_output_____
###Markdown
Here's how we run it.
###Code
results = run_simulation1(system)
plot_results(census, un, results, 'Constant growth model')
###Output
_____no_output_____
###Markdown
Proportional growth Here's a more realistic model where the number of births and deaths is proportional to the current population.
###Code
def run_simulation2(system):
"""Run a model with proportional birth and death.
system: System object
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linrange(system.t_0, system.t_end):
births = system.birth_rate * results[t]
deaths = system.death_rate * results[t]
results[t+1] = results[t] + births - deaths
return results
###Output
_____no_output_____
###Markdown
I picked a death rate that seemed reasonable and then adjusted the birth rate to fit the data.
###Code
system.death_rate = 0.01
system.birth_rate = 0.027
###Output
_____no_output_____
###Markdown
Here's what it looks like.
###Code
results = run_simulation2(system)
plot_results(census, un, results, 'Proportional model')
savefig('figs/chap06-fig01.pdf')
###Output
_____no_output_____
###Markdown
The model fits the data pretty well for the first 20 years, but not so well after that. Factoring out the update function `run_simulation1` and `run_simulation2` are nearly identical except the body of the loop. So we can factor that part out into a function.
###Code
def update_func1(pop, t, system):
"""Compute the population next year.
pop: current population
t: current year
system: system object containing parameters of the model
returns: population next year
"""
births = system.birth_rate * pop
deaths = system.death_rate * pop
return pop + births - deaths
###Output
_____no_output_____
###Markdown
The name `update_func` refers to a function object.
###Code
update_func1
###Output
_____no_output_____
###Markdown
Which we can confirm by checking its type.
###Code
type(update_func1)
###Output
_____no_output_____
###Markdown
`run_simulation` takes the update function as a parameter and calls it just like any other function.
###Code
def run_simulation(system, update_func):
"""Simulate the system using any update function.
system: System object
update_func: function that computes the population next year
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linrange(system.t_0, system.t_end):
results[t+1] = update_func(results[t], t, system)
return results
###Output
_____no_output_____
###Markdown
Here's how we use it.
###Code
t_0 = get_first_label(census)
t_end = get_last_label(census)
p_0 = census[t_0]
system = System(t_0=t_0,
t_end=t_end,
p_0=p_0,
birth_rate=0.027,
death_rate=0.01)
results = run_simulation(system, update_func1)
plot_results(census, un, results, 'Proportional model, factored')
###Output
_____no_output_____
###Markdown
Remember not to put parentheses after `update_func1`. What happens if you try? **Exercise:** When you run `run_simulation`, it runs `update_func1` once for each year between `t_0` and `t_end`. To see that for yourself, add a print statement at the beginning of `update_func1` that prints the values of `t` and `pop`, then run `run_simulation` again. Combining birth and death Since births and deaths get added up, we don't have to compute them separately. We can combine the birth and death rates into a single net growth rate.
###Code
def update_func2(pop, t, system):
"""Compute the population next year.
pop: current population
t: current year
system: system object containing parameters of the model
returns: population next year
"""
net_growth = system.alpha * pop
return pop + net_growth
###Output
_____no_output_____
###Markdown
Here's how it works:
###Code
system.alpha = system.birth_rate - system.death_rate
results = run_simulation(system, update_func2)
plot_results(census, un, results, 'Proportional model, combined birth and death')
###Output
_____no_output_____
###Markdown
Exercises**Exercise:** Maybe the reason the proportional model doesn't work very well is that the growth rate, `alpha`, is changing over time. So let's try a model with different growth rates before and after 1980 (as an arbitrary choice).Write an update function that takes `pop`, `t`, and `system` as parameters. The system object, `system`, should contain two parameters: the growth rate before 1980, `alpha1`, and the growth rate after 1980, `alpha2`. It should use `t` to determine which growth rate to use. Note: Don't forget the `return` statement.Test your function by calling it directly, then pass it to `run_simulation`. Plot the results. Adjust the parameters `alpha1` and `alpha2` to fit the data as well as you can.
###Code
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
Odds and Addends Think Bayes, Second EditionCopyright 2020 Allen B. DowneyLicense: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/)
###Code
# If we're running on Colab, install empiricaldist
# https://pypi.org/project/empiricaldist/
import sys
IN_COLAB = 'google.colab' in sys.modules
if IN_COLAB:
!pip install empiricaldist
# Get utils.py
from os.path import basename, exists
def download(url):
filename = basename(url)
if not exists(filename):
from urllib.request import urlretrieve
local, _ = urlretrieve(url, filename)
print('Downloaded ' + local)
download('https://github.com/AllenDowney/ThinkBayes2/raw/master/soln/utils.py')
from utils import set_pyplot_params
set_pyplot_params()
###Output
_____no_output_____
###Markdown
This chapter presents a new way to represent a degree of certainty, **odds**, and a new form of Bayes's Theorem, called **Bayes's Rule**.Bayes's Rule is convenient if you want to do a Bayesian update on paper or in your head.It also sheds light on the important idea of **evidence** and how we can quantify the strength of evidence.The second part of the chapter is about "addends", that is, quantities being added, and how we can compute their distributions.We'll define functions that compute the distribution of sums, differences, products, and other operations.Then we'll use those distributions as part of a Bayesian update. OddsOne way to represent a probability is with a number between 0 and 1, but that's not the only way.If you have ever bet on a football game or a horse race, you have probably encountered another representation of probability, called **odds**.You might have heard expressions like "the odds are three to one", but you might not know what that means.The **odds in favor** of an event are the ratio of the probabilityit will occur to the probability that it will not.The following function does this calculation.
###Code
def odds(p):
return p / (1-p)
###Output
_____no_output_____
###Markdown
For example, if my team has a 75% chance of winning, the odds in their favor are three to one, because the chance of winning is three times the chance of losing.
###Code
odds(0.75)
###Output
_____no_output_____
###Markdown
You can write odds in decimal form, but it is also common towrite them as a ratio of integers.So "three to one" is sometimes written $3:1$.When probabilities are low, it is more common to report the**odds against** rather than the odds in favor.For example, if my horse has a 10% chance of winning, the odds in favor are $1:9$.
###Code
odds(0.1)
###Output
_____no_output_____
###Markdown
But in that case it would be more common I to say that the odds against are $9:1$.
###Code
odds(0.9)
###Output
_____no_output_____
###Markdown
Given the odds in favor, in decimal form, you can convert to probability like this:
###Code
def prob(o):
return o / (o+1)
###Output
_____no_output_____
###Markdown
For example, if the odds are $3/2$, the corresponding probability is $3/5$:
###Code
prob(3/2)
###Output
_____no_output_____
###Markdown
Or if you represent odds with a numerator and denominator, you can convert to probability like this:
###Code
def prob2(yes, no):
return yes / (yes + no)
prob2(3, 2)
###Output
_____no_output_____
###Markdown
Probabilities and odds are different representations of thesame information; given either one, you can compute the other.But some computations are easier when we work with odds, as we'll see in the next section, and some computations are even easier with log odds, which we'll see later. Bayes's RuleSo far we have worked with Bayes's theorem in the "probability form": $$P(H|D) = \frac{P(H)~P(D|H)}{P(D)}$$Writing $\mathrm{odds}(A)$ for odds in favor of $A$, we can express Bayes's Theorem in "odds form":$$\mathrm{odds}(A|D) = \mathrm{odds}(A)~\frac{P(D|A)}{P(D|B)}$$This is Bayes's Rule, which says that the posterior odds are the prior odds times the likelihood ratio.Bayes's Rule is convenient for computing a Bayesian update on paper or in your head. For example, let's go back to the cookie problem:> Suppose there are two bowls of cookies. Bowl 1 contains 30 vanilla cookies and 10 chocolate cookies. Bowl 2 contains 20 of each. Now suppose you choose one of the bowls at random and, without looking, select a cookie at random. The cookie is vanilla. What is the probability that it came from Bowl 1?The prior probability is 50%, so the prior odds are 1. The likelihood ratio is $\frac{3}{4} / \frac{1}{2}$, or $3/2$. So the posterior odds are $3/2$, which corresponds to probability $3/5$.
###Code
prior_odds = 1
likelihood_ratio = (3/4) / (1/2)
post_odds = prior_odds * likelihood_ratio
post_odds
post_prob = prob(post_odds)
post_prob
###Output
_____no_output_____
###Markdown
If we draw another cookie and it's chocolate, we can do another update:
###Code
likelihood_ratio = (1/4) / (1/2)
post_odds *= likelihood_ratio
post_odds
###Output
_____no_output_____
###Markdown
And convert back to probability.
###Code
post_prob = prob(post_odds)
post_prob
###Output
_____no_output_____
###Markdown
Oliver's BloodI’ll use Bayes’s Rule to solve another problem from MacKay’s[*Information Theory, Inference, and Learning Algorithms*](https://www.inference.org.uk/mackay/itila/):> Two people have left traces of their own blood at the scene of a crime. A suspect, Oliver, is tested and found to have type ‘O’ blood. The blood groups of the two traces are found to be of type ‘O’ (a common type in the local population, having frequency 60%) and of type ‘AB’ (a rare type, with frequency 1%). Do these data \[the traces found at the scene\] give evidence in favor of the proposition that Oliver was one of the people \[who left blood at the scene\]?To answer this question, we need to think about what it means for datato give evidence in favor of (or against) a hypothesis. Intuitively, we might say that data favor a hypothesis if the hypothesis is more likely in light of the data than it was before.In the cookie problem, the prior odds are 1, which corresponds to probability 50%. The posterior odds are $3/2$, or probability 60%. So the vanilla cookie is evidence in favor of Bowl 1. Bayes's Rule provides a way to make this intuition more precise. Again$$\mathrm{odds}(A|D) = \mathrm{odds}(A)~\frac{P(D|A)}{P(D|B)}$$Dividing through by $\mathrm{odds}(A)$, we get: $$\frac{\mathrm{odds}(A|D)}{\mathrm{odds}(A)} = \frac{P(D|A)}{P(D|B)}$$The term on the left is the ratio of the posterior and prior odds. The term on the right is the likelihood ratio, also called the **Bayesfactor**.If the Bayes factor is greater than 1, that means that the data weremore likely under $A$ than under $B$. And that means that the odds aregreater, in light of the data, than they were before.If the Bayes factor is less than 1, that means the data were less likely under $A$ than under $B$, so the odds in favor of $A$ go down.Finally, if the Bayes factor is exactly 1, the data are equally likelyunder either hypothesis, so the odds do not change. Let's apply that to the problem at hand. If Oliver is one of the people who left blood at the crime scene, he accounts for the ‘O’ sample; in that case, the probability of the data is the probability that a random member of the population has type ‘AB’ blood, which is 1%.If Oliver did not leave blood at the scene, we have two samples toaccount for. If we choose two random people from the population, what is the chance of finding one with type ‘O’ and one with type ‘AB’? Well, there are two ways it might happen: * The first person might have ‘O’ and the second ‘AB’, * Or the first person might have ‘AB’ and the second ‘O’.The probability of either combination is $(0.6) (0.01)$, which is 0.6%, so the total probability is twice that, or 1.2%.So the data are a little more likely if Oliver is *not* one of the people who left blood at the scene.We can use these probabilities to compute the likelihood ratio:
###Code
like1 = 0.01
like2 = 2 * 0.6 * 0.01
likelihood_ratio = like1 / like2
likelihood_ratio
###Output
_____no_output_____
###Markdown
Since the likelihood ratio is less than 1, the blood tests are evidence *against* the hypothesis that Oliver left blood at the scence.But it is weak evidence. For example, if the prior odds were 1 (that is, 50% probability), the posterior odds would be 0.83, which corresponds to a probability of 45%:
###Code
post_odds = 1 * like1 / like2
prob(post_odds)
###Output
_____no_output_____
###Markdown
So this evidence doesn't "move the needle" very much.This example is a little contrived, but it demonstrates thecounterintuitive result that data *consistent* with a hypothesis arenot necessarily *in favor of* the hypothesis.If this result still bothers you, this way of thinking might help: thedata consist of a common event, type ‘O’ blood, and a rare event, type‘AB’ blood. If Oliver accounts for the common event, that leaves therare event unexplained. If Oliver doesn’t account for the ‘O’ blood, wehave two chances to find someone in the population with ‘AB’ blood. Andthat factor of two makes the difference. **Exercise:** Suppose that based on other evidence, you prior belief in Oliver's guilt is 90%. How much would the blood evidence in this section change your beliefs? What if you initially thought there was only a 10% chance of his guilt?
###Code
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
AddendsThe second half of this chapter is about distributions of sums and results of other operations.We'll start with a forward problem, where we are given the inputs and compute the distribution of the output.Then we'll work on inverse problems, where we are given the outputs and we compute the distribution of the inputs.As a first example, suppose you roll two dice and add them up. What is the distribution of the sum? I’ll use the following function to create a `Pmf` that represents thepossible outcomes of a die:
###Code
import numpy as np
from empiricaldist import Pmf
def make_die(sides):
outcomes = np.arange(1, sides+1)
die = Pmf(1/sides, outcomes)
return die
###Output
_____no_output_____
###Markdown
On a six-sided die, the outcomes are 1 through 6, allequally likely.
###Code
die = make_die(6)
from utils import decorate
die.bar(alpha=0.4)
decorate(xlabel='Outcome',
ylabel='PMF')
###Output
_____no_output_____
###Markdown
If we roll two dice and add them up, there are 11 possible outcomes, 2through 12, but they are not equally likely. To compute the distributionof the sum, we have to enumerate the possible outcomes. And that's how this function works:
###Code
def add_dist(pmf1, pmf2):
"""Compute the distribution of a sum."""
res = Pmf()
for q1, p1 in pmf1.items():
for q2, p2 in pmf2.items():
q = q1 + q2
p = p1 * p2
res[q] = res(q) + p
return res
###Output
_____no_output_____
###Markdown
The parameters are `Pmf` objects representing distributions.The loops iterate though the quantities and probabilities in the `Pmf` objects.Each time through the loop `q` gets the sum of a pair of quantities, and `p` gets the probability of the pair.Because the same sum might appear more than once, we have to add up the total probability for each sum. Notice a subtle element of this line:``` res[q] = res(q) + p```I use parentheses on the right side of the assignment, which returns 0 if `q` does not appear yet in `res`.I use brackets on the left side of the assignment to create or update an element in `res`; using parentheses on the left side would not work.`Pmf` provides `add_dist`, which does the same thing.You can call it as a method, like this:
###Code
twice = die.add_dist(die)
###Output
_____no_output_____
###Markdown
Or as a function, like this:
###Code
twice = Pmf.add_dist(die, die)
###Output
_____no_output_____
###Markdown
And here's what the result looks like:
###Code
from utils import decorate
def decorate_dice(title=''):
decorate(xlabel='Outcome',
ylabel='PMF',
title=title)
twice = add_dist(die, die)
twice.bar(color='C1', alpha=0.5)
decorate_dice()
###Output
_____no_output_____
###Markdown
If we have a sequence of `Pmf` objects that represent dice, we can compute the distribution of the sum like this:
###Code
def add_dist_seq(seq):
"""Compute Pmf of the sum of values from seq."""
total = seq[0]
for other in seq[1:]:
total = total.add_dist(other)
return total
###Output
_____no_output_____
###Markdown
As an example, we can make a list of three dice like this:
###Code
dice = [die] * 3
###Output
_____no_output_____
###Markdown
And we can compute the distribution of their sum like this.
###Code
thrice = add_dist_seq(dice)
###Output
_____no_output_____
###Markdown
The following figure shows what these three distributions look like:- The distribution of a single die is uniform from 1 to 6.- The sum of two dice has a triangle distribution between 2 and 12.- The sum of three dice has a bell-shaped distribution between 3 and 18.
###Code
import matplotlib.pyplot as plt
die.plot(label='once')
twice.plot(label='twice', style='--')
thrice.plot(label='thrice', style=':')
plt.xticks([0,3,6,9,12,15,18])
decorate_dice(title='Distributions of sums')
###Output
_____no_output_____
###Markdown
As an aside, this example demonstrates the Central Limit Theorem, which says that the distribution of a sum converges on a bell-shaped normal distribution, at least under some conditions. Gluten SensitivityIn 2015 I read a paper that tested whether people diagnosed with gluten sensitivity (but not celiac disease) were able to distinguish gluten flour from non-gluten flour in a blind challenge([you can read the paper here](https://onlinelibrary.wiley.com/doi/full/10.1111/apt.13372)).Out of 35 subjects, 12 correctly identified the gluten flour based onresumption of symptoms while they were eating it. Another 17 wronglyidentified the gluten-free flour based on their symptoms, and 6 wereunable to distinguish.The authors conclude, "Double-blind gluten challenge induces symptomrecurrence in just one-third of patients."This conclusion seems odd to me, because if none of the patients weresensitive to gluten, we would expect some of them to identify the gluten flour by chance. So here's the question: based on this data, how many of the subjects are sensitive to gluten and how many are guessing?We can use Bayes's Theorem to answer this question, but first we have to make some modeling decisions. I'll assume:- People who are sensitive to gluten have a 95% chance of correctly identifying gluten flour under the challenge conditions, and- People who are not sensitive have a 40% chance of identifying the gluten flour by chance (and a 60% chance of either choosing the other flour or failing to distinguish).These particular values are arbitrary, but the results are not sensitive to these choices.I will solve this problem in two steps. First, assuming that we know how many subjects are sensitive, I will compute the distribution of the data. Then, using the likelihood of the data, I will compute the posterior distribution of the number of sensitive patients.The first is the **forward problem**; the second is the **inverseproblem**. The Forward ProblemSuppose we know that 10 of the 35 subjects are sensitive to gluten. Thatmeans that 25 are not:
###Code
n = 35
num_sensitive = 10
num_insensitive = n - num_sensitive
###Output
_____no_output_____
###Markdown
Each sensitive subject has a 95% chance of identifying the gluten flour,so the number of correct identifications follows a binomial distribution.I'll use `make_binomial`, which we defined in >, to make a `Pmf` that represents the binomial distribution.
###Code
from utils import make_binomial
dist_sensitive = make_binomial(num_sensitive, 0.95)
dist_insensitive = make_binomial(num_insensitive, 0.40)
###Output
_____no_output_____
###Markdown
The results are the distributions for the number of correct identifications in each group.Now we can use `add_dist` to compute the distribution of the total number of correct identifications:
###Code
dist_total = Pmf.add_dist(dist_sensitive, dist_insensitive)
###Output
_____no_output_____
###Markdown
Here are the results:
###Code
dist_sensitive.plot(label='sensitive', style=':')
dist_insensitive.plot(label='insensitive', style='--')
dist_total.plot(label='total')
decorate(xlabel='Number of correct identifications',
ylabel='PMF',
title='Gluten sensitivity')
###Output
_____no_output_____
###Markdown
We expect most of the sensitive subjects to identify the gluten flour correctly.Of the 25 insensitive subjects, we expect about 10 to identify the gluten flour by chance.So we expect about 20 correct identifications in total.This is the answer to the forward problem: given the number of sensitive subjects, we can compute the distribution of the data. The Inverse ProblemNow let's solve the inverse problem: given the data, we'll compute the posterior distribution of the number of sensitive subjects.Here's how. I'll loop through the possible values of `num_sensitive` and compute the distribution of the data for each:
###Code
import pandas as pd
table = pd.DataFrame()
for num_sensitive in range(0, n+1):
num_insensitive = n - num_sensitive
dist_sensitive = make_binomial(num_sensitive, 0.95)
dist_insensitive = make_binomial(num_insensitive, 0.4)
dist_total = Pmf.add_dist(dist_sensitive, dist_insensitive)
table[num_sensitive] = dist_total
###Output
_____no_output_____
###Markdown
The loop enumerates the possible values of `num_sensitive`.For each value, it computes the distribution of the total number of correct identifications, and stores the result as a column in a Pandas `DataFrame`.
###Code
table.head(3)
###Output
_____no_output_____
###Markdown
The following figure shows selected columns from the `DataFrame`, corresponding to different hypothetical values of `num_sensitive`:
###Code
table[0].plot(label='num_sensitive = 0')
table[10].plot(label='num_sensitive = 10')
table[20].plot(label='num_sensitive = 20', style='--')
table[30].plot(label='num_sensitive = 30', style=':')
decorate(xlabel='Number of correct identifications',
ylabel='PMF',
title='Gluten sensitivity')
###Output
_____no_output_____
###Markdown
Now we can use this table to compute the likelihood of the data:
###Code
likelihood1 = table.loc[12]
###Output
_____no_output_____
###Markdown
`loc` selects a row from the `DataFrame`.The row with index 12 contains the probability of 12 correct identifications for each hypothetical value of `num_sensitive`.And that's exactly the likelihood we need to do a Bayesian update.I'll use a uniform prior, which implies that I would be equally surprised by any value of `num_sensitive`:
###Code
hypos = np.arange(n+1)
prior = Pmf(1, hypos)
###Output
_____no_output_____
###Markdown
And here's the update:
###Code
posterior1 = prior * likelihood1
posterior1.normalize()
###Output
_____no_output_____
###Markdown
For comparison, I also compute the posterior for another possible outcome, 20 correct identifications.
###Code
likelihood2 = table.loc[20]
posterior2 = prior * likelihood2
posterior2.normalize()
###Output
_____no_output_____
###Markdown
The following figure shows posterior distributions of `num_sensitive` based on the actual data, 12 correct identifications, and the other possible outcome, 20 correct identifications.
###Code
posterior1.plot(label='posterior with 12 correct', color='C4')
posterior2.plot(label='posterior with 20 correct', color='C1')
decorate(xlabel='Number of sensitive subjects',
ylabel='PMF',
title='Posterior distributions')
###Output
_____no_output_____
###Markdown
With 12 correct identifications, the most likely conclusion is that none of the subjects are sensitive to gluten.If there had been 20 correct identifications, the most likely conclusion would be that 11-12 of the subjects were sensitive.
###Code
posterior1.max_prob()
posterior2.max_prob()
###Output
_____no_output_____
###Markdown
SummaryThis chapter presents two topics that are almost unrelated except that they make the title of the chapter catchy.The first part of the chapter is about Bayes's Rule, evidence, and how we can quantify the strength of evidence using a likelihood ratio or Bayes factor.The second part is about `add_dist`, which computes the distribution of a sum.We can use this function to solve forward and inverse problems; that is, given the parameters of a system, we can compute the distribution of the data or, given the data, we can compute the distribution of the parameters.In the next chapter, we'll compute distributions for minimums and maximums, and use them to solve more Bayesian problems.But first you might want to work on these exercises. Exercises **Exercise:** Let's use Bayes's Rule to solve the Elvis problem from >:> Elvis Presley had a twin brother who died at birth. What is the probability that Elvis was an identical twin?In 1935, about 2/3 of twins were fraternal and 1/3 were identical.The question contains two pieces of information we can use to update this prior.* First, Elvis's twin was also male, which is more likely if they were identical twins, with a likelihood ratio of 2.* Also, Elvis's twin died at birth, which is more likely if they were identical twins, with a likelihood ratio of 1.25.If you are curious about where those numbers come from, I wrote [a blog post about it](https://www.allendowney.com/blog/2020/01/28/the-elvis-problem-revisited).
###Code
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** The following is an [interview question that appeared on glassdoor.com](https://www.glassdoor.com/Interview/You-re-about-to-get-on-a-plane-to-Seattle-You-want-to-know-if-you-should-bring-an-umbrella-You-call-3-random-friends-of-y-QTN_519262.htm), attributed to Facebook:> You're about to get on a plane to Seattle. You want to know if you should bring an umbrella. You call 3 random friends of yours who live there and ask each independently if it's raining. Each of your friends has a 2/3 chance of telling you the truth and a 1/3 chance of messing with you by lying. All 3 friends tell you that "Yes" it is raining. What is the probability that it's actually raining in Seattle?Use Bayes's Rule to solve this problem. As a prior you can assume that it rains in Seattle about 10% of the time.This question causes some confusion about the differences between Bayesian and frequentist interpretations of probability; if you are curious about this point, [I wrote a blog article about it](http://allendowney.blogspot.com/2016/09/bayess-theorem-is-not-optional.html).
###Code
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** [According to the CDC](https://www.cdc.gov/tobacco/data_statistics/fact_sheets/health_effects/effects_cig_smoking), people who smoke are about 25 times more likely to develop lung cancer than nonsmokers.[Also according to the CDC](https://www.cdc.gov/tobacco/data_statistics/fact_sheets/adult_data/cig_smoking/index.htm), about 14\% of adults in the U.S. are smokers.If you learn that someone has lung cancer, what is the probability they are a smoker?
###Code
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** In *Dungeons & Dragons*, the amount of damage a goblin can withstand is the sum of two six-sided dice. The amount of damage you inflict with a short sword is determined by rolling one six-sided die.A goblin is defeated if the total damage you inflict is greater than or equal to the amount it can withstand.Suppose you are fighting a goblin and you have already inflicted 3 points of damage. What is your probability of defeating the goblin with your next successful attack?Hint: You can use `Pmf.add_dist` to add a constant amount, like 3, to a `Pmf` and `Pmf.sub_dist` to compute the distribution of remaining points.
###Code
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** Suppose I have a box with a 6-sided die, an 8-sided die, and a 12-sided die.I choose one of the dice at random, roll it twice, multiply the outcomes, and report that the product is 12.What is the probability that I chose the 8-sided die?Hint: `Pmf` provides a function called `mul_dist` that takes two `Pmf` objects and returns a `Pmf` that represents the distribution of the product.
###Code
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** *Betrayal at House on the Hill* is a strategy game in which characters with different attributes explore a haunted house. Depending on their attributes, the characters roll different numbers of dice. For example, if attempting a task that depends on knowledge, Professor Longfellow rolls 5 dice, Madame Zostra rolls 4, and Ox Bellows rolls 3. Each die yields 0, 1, or 2 with equal probability. If a randomly chosen character attempts a task three times and rolls a total of 3 on the first attempt, 4 on the second, and 5 on the third, which character do you think it was?
###Code
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** There are 538 members of the United States Congress. Suppose we audit their investment portfolios and find that 312 of them out-perform the market.Let's assume that an honest member of Congress has only a 50% chance of out-performing the market, but a dishonest member who trades on inside information has a 90% chance. How many members of Congress are honest?
###Code
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
Modeling and Simulation in PythonChapter 6Copyright 2017 Allen DowneyLicense: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)
###Code
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
from pandas import read_html
###Output
_____no_output_____
###Markdown
Code from the previous chapter
###Code
filename = 'data/World_population_estimates.html'
tables = read_html(filename, header=0, index_col=0, decimal='M')
table2 = tables[2]
table2.columns = ['census', 'prb', 'un', 'maddison',
'hyde', 'tanton', 'biraben', 'mj',
'thomlinson', 'durand', 'clark']
un = table2.un / 1e9
un.head()
census = table2.census / 1e9
census.head()
t_0 = get_first_label(census)
t_end = get_last_label(census)
elapsed_time = t_end - t_0
p_0 = get_first_value(census)
p_end = get_last_value(census)
total_growth = p_end - p_0
annual_growth = total_growth / elapsed_time
###Output
_____no_output_____
###Markdown
System objects We can rewrite the code from the previous chapter using system objects.
###Code
system = System(t_0=t_0,
t_end=t_end,
p_0=p_0,
annual_growth=annual_growth)
###Output
_____no_output_____
###Markdown
And we can encapsulate the code that runs the model in a function.
###Code
def run_simulation1(system):
"""Runs the constant growth model.
system: System object
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linrange(system.t_0, system.t_end):
results[t+1] = results[t] + system.annual_growth
return results
magnitude?
###Output
_____no_output_____
###Markdown
We can also encapsulate the code that plots the results.
###Code
def plot_results(census, un, timeseries, title):
"""Plot the estimates and the model.
census: TimeSeries of population estimates
un: TimeSeries of population estimates
timeseries: TimeSeries of simulation results
title: string
"""
plot(census, ':', label='US Census')
plot(un, '--', label='UN DESA')
plot(timeseries, color='gray', label='model')
decorate(xlabel='Year',
ylabel='World population (billion)',
title=title)
###Output
_____no_output_____
###Markdown
Here's how we run it.
###Code
results = run_simulation1(system)
plot_results(census, un, results, 'Constant growth model')
###Output
_____no_output_____
###Markdown
Proportional growth Here's a more realistic model where the number of births and deaths is proportional to the current population.
###Code
def run_simulation2(system):
"""Run a model with proportional birth and death.
system: System object
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linrange(system.t_0, system.t_end):
births = system.birth_rate * results[t]
deaths = system.death_rate * results[t]
results[t+1] = results[t] + births - deaths
return results
###Output
_____no_output_____
###Markdown
I picked a death rate that seemed reasonable and then adjusted the birth rate to fit the data.
###Code
system.death_rate = 0.01
system.birth_rate = 0.027
###Output
_____no_output_____
###Markdown
Here's what it looks like.
###Code
results = run_simulation2(system)
plot_results(census, un, results, 'Proportional model')
savefig('figs/chap06-fig01.pdf')
###Output
Saving figure to file figs/chap06-fig01.pdf
###Markdown
The model fits the data pretty well for the first 20 years, but not so well after that. Factoring out the update function `run_simulation1` and `run_simulation2` are nearly identical except the body of the loop. So we can factor that part out into a function.
###Code
def update_func1(pop, t, system):
"""Compute the population next year.
pop: current population
t: current year
system: system object containing parameters of the model
returns: population next year
"""
births = system.birth_rate * pop
deaths = system.death_rate * pop
return pop + births - deaths
###Output
_____no_output_____
###Markdown
The name `update_func` refers to a function object.
###Code
update_func1
###Output
_____no_output_____
###Markdown
Which we can confirm by checking its type.
###Code
type(update_func1)
###Output
_____no_output_____
###Markdown
`run_simulation` takes the update function as a parameter and calls it just like any other function.
###Code
def run_simulation(system, update_func):
"""Simulate the system using any update function.
system: System object
update_func: function that computes the population next year
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linrange(system.t_0, system.t_end):
results[t+1] = update_func(results[t], t, system)
return results
###Output
_____no_output_____
###Markdown
Here's how we use it.
###Code
t_0 = get_first_label(census)
t_end = get_last_label(census)
p_0 = census[t_0]
system = System(t_0=t_0,
t_end=t_end,
p_0=p_0,
birth_rate=0.027,
death_rate=0.01)
results = run_simulation(system, update_func1)
plot_results(census, un, results, 'Proportional model, factored')
###Output
_____no_output_____
###Markdown
Remember not to put parentheses after `update_func1`. What happens if you try? **Exercise:** When you run `run_simulation`, it runs `update_func1` once for each year between `t_0` and `t_end`. To see that for yourself, add a print statement at the beginning of `update_func1` that prints the values of `t` and `pop`, then run `run_simulation` again. Combining birth and death Since births and deaths get added up, we don't have to compute them separately. We can combine the birth and death rates into a single net growth rate.
###Code
def update_func2(pop, t, system):
"""Compute the population next year.
pop: current population
t: current year
system: system object containing parameters of the model
returns: population next year
"""
net_growth = system.alpha * pop
return pop + net_growth
###Output
_____no_output_____
###Markdown
Here's how it works:
###Code
system.alpha = system.birth_rate - system.death_rate
results = run_simulation(system, update_func2)
plot_results(census, un, results, 'Proportional model, combined birth and death')
###Output
_____no_output_____
###Markdown
Exercises**Exercise:** Maybe the reason the proportional model doesn't work very well is that the growth rate, `alpha`, is changing over time. So let's try a model with different growth rates before and after 1980 (as an arbitrary choice).Write an update function that takes `pop`, `t`, and `system` as parameters. The system object, `system`, should contain two parameters: the growth rate before 1980, `alpha1`, and the growth rate after 1980, `alpha2`. It should use `t` to determine which growth rate to use. Note: Don't forget the `return` statement.Test your function by calling it directly, then pass it to `run_simulation`. Plot the results. Adjust the parameters `alpha1` and `alpha2` to fit the data as well as you can.
###Code
# Solution goes here
def update_func3(pop,t,system):
if t < 1980:
return pop + pop*system.alpha1
else:
return pop + pop*system.alpha2
update_func3(100,1980,system)
system = System(t_0 = int(t_0), t_end = int(t_end),p_0=p_0,alpha1 = 0.019,alpha2 = 0.015)
# Solution goes here
def run_simulation3(system,update_func):
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linspace(int(system.t_0),int(system.t_end)-1,int(system.t_end-system.t_0)):
results[t+1] = update_func(results[t],t,system)
return results
results = run_simulation3(system,update_func3)
plot_results(census, un, results, 'Proportional model, combined birth and death')
###Output
_____no_output_____
###Markdown
Odds and Addends Think Bayes, Second EditionCopyright 2020 Allen B. DowneyLicense: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/)
###Code
# If we're running on Colab, install empiricaldist
# https://pypi.org/project/empiricaldist/
import sys
IN_COLAB = 'google.colab' in sys.modules
if IN_COLAB:
!pip install empiricaldist
# Get utils.py and create directories
import os
if not os.path.exists('utils.py'):
!wget https://github.com/AllenDowney/ThinkBayes2/raw/master/soln/utils.py
from utils import set_pyplot_params
set_pyplot_params()
###Output
_____no_output_____
###Markdown
This chapter presents a new way to represent a degree of certainty, **odds**, and a new form of Bayes's Theorem, called **Bayes's Rule**.Bayes's Rule is convenient if you want to do a Bayesian update on paper or in your head.It also sheds light on the important idea of **evidence** and how we can quantify the strength of evidence.The second part of the chapter is about "addends", that is, quantities being added, and how we can compute their distributions.We'll define functions that compute the distribution of sums, differences, products, and other operations.Then we'll use those distributions as part of a Bayesian update. OddsOne way to represent a probability is with a number between 0 and 1, but that's not the only way.If you have ever bet on a football game or a horse race, you have probably encountered another representation of probability, called **odds**.You might have heard expressions like "the odds are three to one", but you might not know what that means.The **odds in favor** of an event are the ratio of the probabilityit will occur to the probability that it will not.The following function does this calculation.
###Code
def odds(p):
return p / (1-p)
###Output
_____no_output_____
###Markdown
For example, if my team has a 75% chance of winning, the odds in their favor are three to one, because the chance of winning is three times the chance of losing.
###Code
odds(0.75)
###Output
_____no_output_____
###Markdown
You can write odds in decimal form, but it is also common towrite them as a ratio of integers. So "three to one" is sometimes written $3:1$.When probabilities are low, it is more common to report the**odds against** rather than the odds in favor. For example, if my horse has a 10% chance of winning, the odds in favor are $1:9$.
###Code
odds(0.1)
###Output
_____no_output_____
###Markdown
But in that case it would be more common I to say that the odds against are $9:1$.
###Code
odds(0.9)
###Output
_____no_output_____
###Markdown
Given the odds in favor, in decimal form, you can convert to probability like this:
###Code
def prob(o):
return o / (o+1)
###Output
_____no_output_____
###Markdown
For example, if the odds are $3/2$, the corresponding probability is $3/5$:
###Code
prob(3/2)
###Output
_____no_output_____
###Markdown
Or if you represent odds with a numerator and denominator, you can convert to probability like this:
###Code
def prob2(yes, no):
return yes / (yes + no)
prob2(3, 2)
###Output
_____no_output_____
###Markdown
Probabilities and odds are different representations of thesame information; given either one, you can compute the other.But some computations are easier when we work with odds, as we'll see in the next section, and some computations are even easier with log odds, which we'll see later. Bayes's RuleSo far we have worked with Bayes's theorem in the "probability form": $$P(H|D) = \frac{P(H)~P(D|H)}{P(D)}$$Writing $\mathrm{odds}(A)$ for odds in favor of $A$, we can express Bayes's Theorem in "odds form":$$\mathrm{odds}(A|D) = \mathrm{odds}(A)~\frac{P(D|A)}{P(D|B)}$$This is Bayes's Rule, which says that the posterior odds are the prior odds times the likelihood ratio.Bayes's Rule is convenient for computing a Bayesian update on paper or in your head. For example, let's go back to the cookie problem:> Suppose there are two bowls of cookies. Bowl 1 contains 30 vanilla cookies and 10 chocolate cookies. Bowl 2 contains 20 of each. Now suppose you choose one of the bowls at random and, without looking, select a cookie at random. The cookie is vanilla. What is the probability that it came from Bowl 1?The prior probability is 50%, so the prior odds are 1. The likelihood ratio is $\frac{3}{4} / \frac{1}{2}$, or $3/2$. So the posterior odds are $3/2$, which corresponds to probability $3/5$.
###Code
prior_odds = 1
likelihood_ratio = (3/4) / (1/2)
post_odds = prior_odds * likelihood_ratio
post_odds
post_prob = prob(post_odds)
post_prob
###Output
_____no_output_____
###Markdown
If we draw another cookie and it's chocolate, we can do another update:
###Code
likelihood_ratio = (1/4) / (1/2)
post_odds *= likelihood_ratio
post_odds
###Output
_____no_output_____
###Markdown
And convert back to probability.
###Code
post_prob = prob(post_odds)
post_prob
###Output
_____no_output_____
###Markdown
Oliver's BloodI’ll use Bayes’s Rule to solve another problem from MacKay’s[*Information Theory, Inference, and Learning Algorithms*](https://www.inference.org.uk/mackay/itila/):> Two people have left traces of their own blood at the scene of a crime. A suspect, Oliver, is tested and found to have type ‘O’ blood. The blood groups of the two traces are found to be of type ‘O’ (a common type in the local population, having frequency 60%) and of type ‘AB’ (a rare type, with frequency 1%). Do these data \[the traces found at the scene\] give evidence in favor of the proposition that Oliver was one of the people \[who left blood at the scene\]?To answer this question, we need to think about what it means for datato give evidence in favor of (or against) a hypothesis. Intuitively, we might say that data favor a hypothesis if the hypothesis is more likely in light of the data than it was before.In the cookie problem, the prior odds are 1, which corresponds to probability 50%. The posterior odds are $3/2$, or probability 60%. So the vanilla cookie is evidence in favor of Bowl 1. Bayes's Rule provides a way to make this intuition more precise. Again$$\mathrm{odds}(A|D) = \mathrm{odds}(A)~\frac{P(D|A)}{P(D|B)}$$Dividing through by $\mathrm{odds}(A)$, we get: $$\frac{\mathrm{odds}(A|D)}{\mathrm{odds}(A)} = \frac{P(D|A)}{P(D|B)}$$The term on the left is the ratio of the posterior and prior odds. The term on the right is the likelihood ratio, also called the **Bayesfactor**.If the Bayes factor is greater than 1, that means that the data weremore likely under $A$ than under $B$. And that means that the odds aregreater, in light of the data, than they were before.If the Bayes factor is less than 1, that means the data were less likely under $A$ than under $B$, so the odds in favor of $A$ go down.Finally, if the Bayes factor is exactly 1, the data are equally likelyunder either hypothesis, so the odds do not change. Let's apply that to the problem at hand. If Oliver is one of the people who left blood at the crime scene, he accounts for the ‘O’ sample; in that case, the probability of the data is the probability that a random member of the population has type ‘AB’ blood, which is 1%.If Oliver did not leave blood at the scene, we have two samples toaccount for. If we choose two random people from the population, what is the chance of finding one with type ‘O’ and one with type ‘AB’? Well, there are two ways it might happen: * The first person might have ‘O’ and the second ‘AB’, * Or the first person might have ‘AB’ and the second ‘O’.The probability of either combination is $(0.6) (0.01)$, which is 0.6%, so the total probability is twice that, or 1.2%.So the data are a little more likely if Oliver is *not* one of the people who left blood at the scene.We can use these probabilities to compute the likelihood ratio:
###Code
like1 = 0.01
like2 = 2 * 0.6 * 0.01
likelihood_ratio = like1 / like2
likelihood_ratio
###Output
_____no_output_____
###Markdown
Since the likelihood ratio is less than 1, the blood tests are evidence *against* the hypothesis that Oliver left blood at the scence.But it is weak evidence. For example, if the prior odds were 1 (that is, 50% probability), the posterior odds would be 0.83, which corresponds to a probability of 45%:
###Code
post_odds = 1 * like1 / like2
prob(post_odds)
###Output
_____no_output_____
###Markdown
So this evidence doesn't "move the needle" very much.This example is a little contrived, but it is demonstrates thecounterintuitive result that data *consistent* with a hypothesis arenot necessarily *in favor of* the hypothesis.If this result still bothers you, this way of thinking might help: thedata consist of a common event, type ‘O’ blood, and a rare event, type‘AB’ blood. If Oliver accounts for the common event, that leaves therare event unexplained. If Oliver doesn’t account for the ‘O’ blood, wehave two chances to find someone in the population with ‘AB’ blood. Andthat factor of two makes the difference. **Exercise:** Suppose that based on other evidence, you prior belief in Oliver's guilt is 90%. How much would the blood evidence in this section change your beliefs? What if you initially thought there was only a 10% chance of his guilt?
###Code
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
AddendsThe second half of this chapter is about distributions of sums and results of other operations.We'll start with a forward problem, where we are given the inputs and compute the distribution of the output.Then we'll work on inverse problems, where we are given the outputs and we compute the distribution of the inputs.As a first example, suppose you roll two dice and add them up. What is the distribution of the sum? I’ll use the following function to create a `Pmf` that represents thepossible outcomes of a die:
###Code
import numpy as np
from empiricaldist import Pmf
def make_die(sides):
outcomes = np.arange(1, sides+1)
die = Pmf(1/sides, outcomes)
return die
###Output
_____no_output_____
###Markdown
On a six-sided die, the outcomes are 1 through 6, allequally likely.
###Code
die = make_die(6)
from utils import decorate
die.bar(alpha=0.4)
decorate(xlabel='Outcome',
ylabel='PMF')
###Output
_____no_output_____
###Markdown
If we roll two dice and add them up, there are 11 possible outcomes, 2through 12, but they are not equally likely. To compute the distributionof the sum, we have to enumerate the possible outcomes. And that's how this function works:
###Code
def add_dist(pmf1, pmf2):
"""Compute the distribution of a sum."""
res = Pmf()
for q1, p1 in pmf1.items():
for q2, p2 in pmf2.items():
q = q1 + q2
p = p1 * p2
res[q] = res(q) + p
return res
###Output
_____no_output_____
###Markdown
The parameters are `Pmf` objects representing distributions.The loops iterate though the quantities and probabilities in the `Pmf` objects.Each time through the loop `q` gets the sum of a pair of quantities, and `p` gets the probability of the pair.Because the same sum might appear more than once, we have to add up the total probability for each sum. Notice a subtle element of this line:``` res[q] = res(q) + p```I use parentheses on the right side of the assignment, which returns 0 if `q` does not appear yet in `res`.I use brackets on the left side of the assignment to create or update an element in `res`; using parentheses on the left side would not work.`Pmf` provides `add_dist`, which does the same thing.You can call it as a method, like this:
###Code
twice = die.add_dist(die)
###Output
_____no_output_____
###Markdown
Or as a function, like this:
###Code
twice = Pmf.add_dist(die, die)
###Output
_____no_output_____
###Markdown
And here's what the result looks like:
###Code
from utils import decorate
def decorate_dice(title=''):
decorate(xlabel='Outcome',
ylabel='PMF',
title=title)
twice = add_dist(die, die)
twice.bar(color='C1', alpha=0.5)
decorate_dice()
###Output
_____no_output_____
###Markdown
If we have a sequence of `Pmf` objects that represent dice, we can compute the distribution of the sum like this:
###Code
def add_dist_seq(seq):
"""Compute Pmf of the sum of values from seq."""
total = seq[0]
for other in seq[1:]:
total = total.add_dist(other)
return total
###Output
_____no_output_____
###Markdown
As an example, we can make a list of three dice like this:
###Code
dice = [die] * 3
###Output
_____no_output_____
###Markdown
And we can compute the distribution of their sum like this.
###Code
thrice = add_dist_seq(dice)
###Output
_____no_output_____
###Markdown
The following figure shows what these three distributions look like:- The distribution of a single die is uniform from 1 to 6.- The sum of two dice has a triangle distribution between 2 and 12.- The sum of three dice has a bell-shaped distribution between 3 and 18.
###Code
import matplotlib.pyplot as plt
die.plot(label='once')
twice.plot(label='twice', style='--')
thrice.plot(label='thrice', style=':')
plt.xticks([0,3,6,9,12,15,18])
decorate_dice(title='Distributions of sums')
###Output
_____no_output_____
###Markdown
As an aside, this example demonstrates the Central Limit Theorem, which says that the distribution of a sum converges on a bell-shaped normal distribution, at least under some conditions. Gluten SensitivityIn 2015 I read a paper that tested whether people diagnosed with gluten sensitivity (but not celiac disease) were able to distinguish gluten flour from non-gluten flour in a blind challenge([you can read the paper here](https://onlinelibrary.wiley.com/doi/full/10.1111/apt.13372)).Out of 35 subjects, 12 correctly identified the gluten flour based onresumption of symptoms while they were eating it. Another 17 wronglyidentified the gluten-free flour based on their symptoms, and 6 wereunable to distinguish.The authors conclude, "Double-blind gluten challenge induces symptomrecurrence in just one-third of patients."This conclusion seems odd to me, because if none of the patients weresensitive to gluten, we would expect some of them to identify the gluten flour by chance. So here's the question: based on this data, how many of the subjects are sensitive to gluten and how many are guessing?We can use Bayes's Theorem to answer this question, but first we have to make some modeling decisions. I'll assume:- People who are sensitive to gluten have a 95% chance of correctly identifying gluten flour under the challenge conditions, and- People who are not sensitive have a 40% chance of identifying the gluten flour by chance (and a 60% chance of either choosing the other flour or failing to distinguish).These particular values are arbitrary, but the results are not sensitive to these choices.I will solve this problem in two steps. First, assuming that we know how many subjects are sensitive, I will compute the distribution of the data. Then, using the likelihood of the data, I will compute the posterior distribution of the number of sensitive patients.The first is the **forward problem**; the second is the **inverseproblem**. The Forward ProblemSuppose we know that 10 of the 35 subjects are sensitive to gluten. Thatmeans that 25 are not:
###Code
n = 35
num_sensitive = 10
num_insensitive = n - num_sensitive
###Output
_____no_output_____
###Markdown
Each sensitive subject has a 95% chance of identifying the gluten flour,so the number of correct identifications follows a binomial distribution.I'll use `make_binomial`, which we defined in >, to make a `Pmf` that represents the binomial distribution.
###Code
from utils import make_binomial
dist_sensitive = make_binomial(num_sensitive, 0.95)
dist_insensitive = make_binomial(num_insensitive, 0.40)
###Output
_____no_output_____
###Markdown
The results are the distributions for the number of correct identifications in each group.Now we can use `add_dist` to compute the distribution of the total number of correct identifications:
###Code
dist_total = Pmf.add_dist(dist_sensitive, dist_insensitive)
###Output
_____no_output_____
###Markdown
Here are the results:
###Code
dist_sensitive.plot(label='sensitive', style=':')
dist_insensitive.plot(label='insensitive', style='--')
dist_total.plot(label='total')
decorate(xlabel='Number of correct identifications',
ylabel='PMF',
title='Gluten sensitivity')
###Output
_____no_output_____
###Markdown
We expect most of the sensitive subjects to identify the gluten flour correctly.Of the 25 insensitive subjects, we expect about 10 to identify the gluten flour by chance.So we expect about 20 correct identifications in total.This is the answer to the forward problem: given the number of sensitive subjects, we can compute the distribution of the data. The Inverse ProblemNow let's solve the inverse problem: given the data, we'll compute the posterior distribution of the number of sensitive subjects.Here's how. I'll loop through the possible values of `num_sensitive` and compute the distribution of the data for each:
###Code
import pandas as pd
table = pd.DataFrame()
for num_sensitive in range(0, n+1):
num_insensitive = n - num_sensitive
dist_sensitive = make_binomial(num_sensitive, 0.95)
dist_insensitive = make_binomial(num_insensitive, 0.4)
dist_total = Pmf.add_dist(dist_sensitive, dist_insensitive)
table[num_sensitive] = dist_total
###Output
_____no_output_____
###Markdown
The loop enumerates the possible values of `num_sensitive`.For each value, it computes the distribution of the total number of correct identifications, and stores the result as a column in a Pandas `DataFrame`.
###Code
table.head(3)
###Output
_____no_output_____
###Markdown
The following figure shows selected columns from the `DataFrame`, corresponding to different hypothetical values of `num_sensitive`:
###Code
table[0].plot(label='num_sensitive = 0')
table[10].plot(label='num_sensitive = 10')
table[20].plot(label='num_sensitive = 20', style='--')
table[30].plot(label='num_sensitive = 30', style=':')
decorate(xlabel='Number of correct identifications',
ylabel='PMF',
title='Gluten sensitivity')
###Output
_____no_output_____
###Markdown
Now we can use this table to compute the likelihood of the data:
###Code
likelihood1 = table.loc[12]
###Output
_____no_output_____
###Markdown
`loc` selects a row from the `DataFrame`.The row with index 12 contains the probability of 12 correct identifications for each hypothetical value of `num_sensitive`.And that's exactly the likelihood we need to do a Bayesian update.I'll use a uniform prior, which implies that I would be equally surprised by any value of `num_sensitive`:
###Code
hypos = np.arange(n+1)
prior = Pmf(1, hypos)
###Output
_____no_output_____
###Markdown
And here's the update:
###Code
posterior1 = prior * likelihood1
posterior1.normalize()
###Output
_____no_output_____
###Markdown
For comparison, I also compute the posterior for another possible outcome, 20 correct identifications.
###Code
likelihood2 = table.loc[20]
posterior2 = prior * likelihood2
posterior2.normalize()
###Output
_____no_output_____
###Markdown
The following figure shows posterior distributions of `num_sensitive` based on the actual data, 12 correct identifications, and the other possible outcome, 20 correct identifications.
###Code
posterior1.plot(label='posterior with 12 correct', color='C4')
posterior2.plot(label='posterior with 20 correct', color='C1')
decorate(xlabel='Number of sensitive subjects',
ylabel='PMF',
title='Posterior distributions')
###Output
_____no_output_____
###Markdown
With 12 correct identifications, the most likely conclusion is that none of the subjects are sensitive to gluten.If there had been 20 correct identifications, the most likely conclusion would be that 11-12 of the subjects were sensitive.
###Code
posterior1.max_prob()
posterior2.max_prob()
###Output
_____no_output_____
###Markdown
SummaryThis chapter presents two topics that are almost unrelated except that they make the title of the chapter catchy.The first part of the chapter is about Bayes's Rule, evidence, and how we can quantify the strength of evidence using a likelihood ratio or Bayes factor.The second part is about `add_dist`, which computes the distribution of a sum.We can use this function to solve forward and inverse problems; that is, given the parameters of a system, we can compute the distribution of the data or, given the data, we can compute the distribution of the parameters.In the next chapter, we'll compute distributions for minimums and maximums, and use them to solve more Bayesian problems.But first you might want to work on these exercises. Exercises **Exercise:** Let's use Bayes's Rule to solve the Elvis problem from >:> Elvis Presley had a twin brother who died at birth. What is the probability that Elvis was an identical twin?In 1935, about 2/3 of twins were fraternal and 1/3 were identical.The question contains two pieces of information we can use to update this prior.* First, Elvis's twin was also male, which is more likely if they were identical twins, with a likelihood ratio of 2.* Also, Elvis's twin died at birth, which is more likely if they were identical twins, with a likelihood ratio of 1.25.If you are curious about where those numbers come from, I wrote [a blog post about it](https://www.allendowney.com/blog/2020/01/28/the-elvis-problem-revisited).
###Code
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** The following is an [interview question that appeared on glassdoor.com](https://www.glassdoor.com/Interview/You-re-about-to-get-on-a-plane-to-Seattle-You-want-to-know-if-you-should-bring-an-umbrella-You-call-3-random-friends-of-y-QTN_519262.htm), attributed to Facebook:> You're about to get on a plane to Seattle. You want to know if you should bring an umbrella. You call 3 random friends of yours who live there and ask each independently if it's raining. Each of your friends has a 2/3 chance of telling you the truth and a 1/3 chance of messing with you by lying. All 3 friends tell you that "Yes" it is raining. What is the probability that it's actually raining in Seattle?Use Bayes's Rule to solve this problem. As a prior you can assume that it rains in Seattle about 10% of the time.This question causes some confusion about the differences between Bayesian and frequentist interpretations of probability; if you are curious about this point, [I wrote a blog article about it](http://allendowney.blogspot.com/2016/09/bayess-theorem-is-not-optional.html).
###Code
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** [According to the CDC](https://www.cdc.gov/tobacco/data_statistics/fact_sheets/health_effects/effects_cig_smoking), people who smoke are about 25 times more likely to develop lung cancer than nonsmokers.[Also according to the CDC](https://www.cdc.gov/tobacco/data_statistics/fact_sheets/adult_data/cig_smoking/index.htm), about 14\% of adults in the U.S. are smokers.If you learn that someone has lung cancer, what is the probability they are a smoker?
###Code
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** In *Dungeons & Dragons*, the amount of damage a goblin can withstand is the sum of two six-sided dice. The amount of damage you inflict with a short sword is determined by rolling one six-sided die.A goblin is defeated if the total damage you inflict is greater than or equal to the amount it can withstand.Suppose you are fighting a goblin and you have already inflicted 3 points of damage. What is your probability of defeating the goblin with your next successful attack?Hint: You can use `Pmf.add_dist` to add a constant amount, like 3, to a `Pmf` and `Pmf.sub_dist` to compute the distribution of remaining points.
###Code
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** Suppose I have a box with a 6-sided die, an 8-sided die, and a 12-sided die.I choose one of the dice at random, roll it twice, multiply the outcomes, and report that the product is 12.What is the probability that I chose the 8-sided die?Hint: `Pmf` provides a function called `mul_dist` that takes two `Pmf` objects and returns a `Pmf` that represents the distribution of the product.
###Code
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** *Betrayal at House on the Hill* is a strategy game in which characters with different attributes explore a haunted house. Depending on their attributes, the characters roll different numbers of dice. For example, if attempting a task that depends on knowledge, Professor Longfellow rolls 5 dice, Madame Zostra rolls 4, and Ox Bellows rolls 3. Each die yields 0, 1, or 2 with equal probability. If a randomly chosen character attempts a task three times and rolls a total of 3 on the first attempt, 4 on the second, and 5 on the third, which character do you think it was?
###Code
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** There are 538 members of the United States Congress. Suppose we audit their investment portfolios and find that 312 of them out-perform the market.Let's assume that an honest member of Congress has only a 50% chance of out-performing the market, but a dishonest member who trades on inside information has a 90% chance. How many members of Congress are honest?
###Code
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
Modeling and Simulation in PythonChapter 6Copyright 2017 Allen DowneyLicense: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)
###Code
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
from pandas import read_html
###Output
_____no_output_____
###Markdown
Code from the previous chapter
###Code
filename = 'data/World_population_estimates.html'
tables = read_html(filename, header=0, index_col=0, decimal='M')
table2 = tables[2]
table2.columns = ['census', 'prb', 'un', 'maddison',
'hyde', 'tanton', 'biraben', 'mj',
'thomlinson', 'durand', 'clark']
un = table2.un / 1e9
un.head()
census = table2.census / 1e9
census.head()
t_0 = get_first_label(census)
t_end = get_last_label(census)
elapsed_time = t_end - t_0
p_0 = get_first_value(census)
p_end = get_last_value(census)
total_growth = p_end - p_0
annual_growth = total_growth / elapsed_time
###Output
_____no_output_____
###Markdown
System objects We can rewrite the code from the previous chapter using system objects.
###Code
system = System(t_0=t_0,
t_end=t_end,
p_0=p_0,
annual_growth=annual_growth)
###Output
_____no_output_____
###Markdown
And we can encapsulate the code that runs the model in a function.
###Code
def run_simulation1(system):
"""Runs the constant growth model.
system: System object
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linrange(system.t_0, system.t_end):
results[t+1] = results[t] + system.annual_growth
return results
###Output
_____no_output_____
###Markdown
We can also encapsulate the code that plots the results.
###Code
def plot_results(census, un, timeseries, title):
"""Plot the estimates and the model.
census: TimeSeries of population estimates
un: TimeSeries of population estimates
timeseries: TimeSeries of simulation results
title: string
"""
plot(census, ':', label='US Census')
plot(un, '--', label='UN DESA')
plot(timeseries, color='gray', label='model')
decorate(xlabel='Year',
ylabel='World population (billion)',
title=title)
###Output
_____no_output_____
###Markdown
Here's how we run it.
###Code
results = run_simulation1(system)
plot_results(census, un, results, 'Constant growth model')
###Output
_____no_output_____
###Markdown
Proportional growth Here's a more realistic model where the number of births and deaths is proportional to the current population.
###Code
def run_simulation2(system):
"""Run a model with proportional birth and death.
system: System object
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linrange(system.t_0, system.t_end):
births = system.birth_rate * results[t]
deaths = system.death_rate * results[t]
results[t+1] = results[t] + births - deaths
return results
###Output
_____no_output_____
###Markdown
I picked a death rate that seemed reasonable and then adjusted the birth rate to fit the data.
###Code
system.death_rate = 0.01
system.birth_rate = 0.027
###Output
_____no_output_____
###Markdown
Here's what it looks like.
###Code
results = run_simulation2(system)
plot_results(census, un, results, 'Proportional model')
savefig('figs/chap06-fig01.pdf')
###Output
Saving figure to file figs/chap06-fig01.pdf
###Markdown
The model fits the data pretty well for the first 20 years, but not so well after that. Factoring out the update function `run_simulation1` and `run_simulation2` are nearly identical except the body of the loop. So we can factor that part out into a function.
###Code
def update_func1(pop, t, system):
"""Compute the population next year.
pop: current population
t: current year
system: system object containing parameters of the model
returns: population next year
"""
print(t, pop)
births = system.birth_rate * pop
deaths = system.death_rate * pop
return pop + births - deaths
###Output
_____no_output_____
###Markdown
The name `update_func` refers to a function object.
###Code
update_func1
###Output
_____no_output_____
###Markdown
Which we can confirm by checking its type.
###Code
type(update_func1)
###Output
_____no_output_____
###Markdown
`run_simulation` takes the update function as a parameter and calls it just like any other function.
###Code
def run_simulation(system, update_func):
"""Simulate the system using any update function.
system: System object
update_func: function that computes the population next year
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linrange(system.t_0, system.t_end):
results[t+1] = update_func(results[t], t, system)
return results
###Output
_____no_output_____
###Markdown
Here's how we use it.
###Code
t_0 = get_first_label(census)
t_end = get_last_label(census)
p_0 = census[t_0]
system = System(t_0=t_0,
t_end=t_end,
p_0=p_0,
birth_rate=0.027,
death_rate=0.01)
results = run_simulation(system, update_func1)
plot_results(census, un, results, 'Proportional model, factored')
###Output
1950 2.557628654
1951 2.601108341118
1952 2.645327182917006
1953 2.6902977450265952
1954 2.7360328066920476
1955 2.7825453644058125
1956 2.829848635600711
1957 2.8779560624059233
1958 2.926881315466824
1959 2.9766382978297603
1960 3.0272411488928666
1961 3.078704248424045
1962 3.131042220647254
1963 3.184269938398258
1964 3.2384025273510284
1965 3.293455370315996
1966 3.349444111611368
1967 3.406384661508761
1968 3.46429320075441
1969 3.523186185167235
1970 3.583080350315078
1971 3.6439927162704344
1972 3.7059405924470314
1973 3.768941582518631
1974 3.833013589421448
1975 3.8981748204416125
1976 3.9644437923891203
1977 4.031839336859735
1978 4.100380605586351
1979 4.170087075881319
1980 4.240978556171301
1981 4.313075191626214
1982 4.3863974698838595
1983 4.460966226871885
1984 4.536802652728707
1985 4.613928297825096
1986 4.692365078888122
1987 4.77213528522922
1988 4.8532615850781164
1989 4.935767032024445
1990 5.0196750715688605
1991 5.1050095477855315
1992 5.191794710097886
1993 5.28005522016955
1994 5.369816158912433
1995 5.461103033613944
1996 5.553941785185382
1997 5.648358795533533
1998 5.744380895057604
1999 5.8420353702735826
2000 5.941349971568234
2001 6.042352921084894
2002 6.145072920743337
2003 6.249539160395973
2004 6.355781326122704
2005 6.46382960866679
2006 6.573714712014126
2007 6.685467862118366
2008 6.799120815774378
2009 6.9147058696425425
2010 7.032255869426465
2011 7.151804219206714
2012 7.273384890933229
2013 7.397032434079093
2014 7.522781985458438
2015 7.650669279211232
###Markdown
Remember not to put parentheses after `update_func1`. What happens if you try? **Exercise:** When you run `run_simulation`, it runs `update_func1` once for each year between `t_0` and `t_end`. To see that for yourself, add a print statement at the beginning of `update_func1` that prints the values of `t` and `pop`, then run `run_simulation` again. Combining birth and death Since births and deaths get added up, we don't have to compute them separately. We can combine the birth and death rates into a single net growth rate.
###Code
def update_func2(pop, t, system):
"""Compute the population next year.
pop: current population
t: current year
system: system object containing parameters of the model
returns: population next year
"""
net_growth = system.alpha * pop
return pop + net_growth
###Output
_____no_output_____
###Markdown
Here's how it works:
###Code
system.alpha = system.birth_rate - system.death_rate
results = run_simulation(system, update_func2)
plot_results(census, un, results, 'Proportional model, combined birth and death')
###Output
_____no_output_____
###Markdown
Exercises**Exercise:** Maybe the reason the proportional model doesn't work very well is that the growth rate, `alpha`, is changing over time. So let's try a model with different growth rates before and after 1980 (as an arbitrary choice).Write an update function that takes `pop`, `t`, and `system` as parameters. The system object, `system`, should contain two parameters: the growth rate before 1980, `alpha1`, and the growth rate after 1980, `alpha2`. It should use `t` to determine which growth rate to use. Note: Don't forget the `return` statement.Test your function by calling it directly, then pass it to `run_simulation`. Plot the results. Adjust the parameters `alpha1` and `alpha2` to fit the data as well as you can.
###Code
system = System(t_0=t_0,
t_end=t_end,
p_0=p_0,
alpha1 = 0.019,
alpha2 = 0.014)
def update_func3(pop, t, system):
"""Compute the population next year.
pop: current population
t: current year
system: system object containing parameters of the model
returns: population next year
"""
if t < 1980:
net_growth = system.alpha1 * pop
else:
net_growth = system.alpha2 * pop
return pop + net_growth
results = run_simulation(system, update_func3)
plot_results(census, un, results, 'Proportional model, combined birth and death')
###Output
_____no_output_____
###Markdown
Odds and Addends Think Bayes, Second EditionCopyright 2020 Allen B. DowneyLicense: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/)
###Code
# If we're running on Colab, install empiricaldist
# https://pypi.org/project/empiricaldist/
import sys
IN_COLAB = 'google.colab' in sys.modules
if IN_COLAB:
!pip install empiricaldist
# Get utils.py and create directories
import os
if not os.path.exists('utils.py'):
!wget https://github.com/AllenDowney/ThinkBayes2/raw/master/soln/utils.py
from utils import set_pyplot_params
set_pyplot_params()
###Output
_____no_output_____
###Markdown
This chapter presents a new way to represent a degree of certainty, **odds**, and a new form of Bayes's Theorem, called **Bayes's Rule**.Bayes's Rule is convenient if you want to do a Bayesian update on paper or in your head.It also sheds light on the important idea of **evidence** and how we can quantify the strength of evidence.The second part of the chapter is about "addends", that is, quantities being added, and how we can compute their distributions.We'll define functions that compute the distribution of sums, differences, products, and other operations.Then we'll use those distributions as part of a Bayesian update. OddsOne way to represent a probability is with a number between 0 and 1, but that's not the only way.If you have ever bet on a football game or a horse race, you have probably encountered another representation of probability, called **odds**.You might have heard expressions like "the odds are three to one", but you might not know what that means.The **odds in favor** of an event are the ratio of the probabilityit will occur to the probability that it will not.The following function does this calculation.
###Code
def odds(p):
return p / (1-p)
###Output
_____no_output_____
###Markdown
For example, if my team has a 75% chance of winning, the odds in their favor are three to one, because the chance of winning is three times the chance of losing.
###Code
odds(0.75)
###Output
_____no_output_____
###Markdown
You can write odds in decimal form, but it is also common towrite them as a ratio of integers.So "three to one" is sometimes written $3:1$.When probabilities are low, it is more common to report the**odds against** rather than the odds in favor.For example, if my horse has a 10% chance of winning, the odds in favor are $1:9$.
###Code
odds(0.1)
###Output
_____no_output_____
###Markdown
But in that case it would be more common I to say that the odds against are $9:1$.
###Code
odds(0.9)
###Output
_____no_output_____
###Markdown
Given the odds in favor, in decimal form, you can convert to probability like this:
###Code
def prob(o):
return o / (o+1)
###Output
_____no_output_____
###Markdown
For example, if the odds are $3/2$, the corresponding probability is $3/5$:
###Code
prob(3/2)
###Output
_____no_output_____
###Markdown
Or if you represent odds with a numerator and denominator, you can convert to probability like this:
###Code
def prob2(yes, no):
return yes / (yes + no)
prob2(3, 2)
###Output
_____no_output_____
###Markdown
Probabilities and odds are different representations of thesame information; given either one, you can compute the other.But some computations are easier when we work with odds, as we'll see in the next section, and some computations are even easier with log odds, which we'll see later. Bayes's RuleSo far we have worked with Bayes's theorem in the "probability form": $$P(H|D) = \frac{P(H)~P(D|H)}{P(D)}$$Writing $\mathrm{odds}(A)$ for odds in favor of $A$, we can express Bayes's Theorem in "odds form":$$\mathrm{odds}(A|D) = \mathrm{odds}(A)~\frac{P(D|A)}{P(D|B)}$$This is Bayes's Rule, which says that the posterior odds are the prior odds times the likelihood ratio.Bayes's Rule is convenient for computing a Bayesian update on paper or in your head. For example, let's go back to the cookie problem:> Suppose there are two bowls of cookies. Bowl 1 contains 30 vanilla cookies and 10 chocolate cookies. Bowl 2 contains 20 of each. Now suppose you choose one of the bowls at random and, without looking, select a cookie at random. The cookie is vanilla. What is the probability that it came from Bowl 1?The prior probability is 50%, so the prior odds are 1. The likelihood ratio is $\frac{3}{4} / \frac{1}{2}$, or $3/2$. So the posterior odds are $3/2$, which corresponds to probability $3/5$.
###Code
prior_odds = 1
likelihood_ratio = (3/4) / (1/2)
post_odds = prior_odds * likelihood_ratio
post_odds
post_prob = prob(post_odds)
post_prob
###Output
_____no_output_____
###Markdown
If we draw another cookie and it's chocolate, we can do another update:
###Code
likelihood_ratio = (1/4) / (1/2)
post_odds *= likelihood_ratio
post_odds
###Output
_____no_output_____
###Markdown
And convert back to probability.
###Code
post_prob = prob(post_odds)
post_prob
###Output
_____no_output_____
###Markdown
Oliver's BloodI’ll use Bayes’s Rule to solve another problem from MacKay’s[*Information Theory, Inference, and Learning Algorithms*](https://www.inference.org.uk/mackay/itila/):> Two people have left traces of their own blood at the scene of a crime. A suspect, Oliver, is tested and found to have type ‘O’ blood. The blood groups of the two traces are found to be of type ‘O’ (a common type in the local population, having frequency 60%) and of type ‘AB’ (a rare type, with frequency 1%). Do these data \[the traces found at the scene\] give evidence in favor of the proposition that Oliver was one of the people \[who left blood at the scene\]?To answer this question, we need to think about what it means for datato give evidence in favor of (or against) a hypothesis. Intuitively, we might say that data favor a hypothesis if the hypothesis is more likely in light of the data than it was before.In the cookie problem, the prior odds are 1, which corresponds to probability 50%. The posterior odds are $3/2$, or probability 60%. So the vanilla cookie is evidence in favor of Bowl 1. Bayes's Rule provides a way to make this intuition more precise. Again$$\mathrm{odds}(A|D) = \mathrm{odds}(A)~\frac{P(D|A)}{P(D|B)}$$Dividing through by $\mathrm{odds}(A)$, we get: $$\frac{\mathrm{odds}(A|D)}{\mathrm{odds}(A)} = \frac{P(D|A)}{P(D|B)}$$The term on the left is the ratio of the posterior and prior odds. The term on the right is the likelihood ratio, also called the **Bayesfactor**.If the Bayes factor is greater than 1, that means that the data weremore likely under $A$ than under $B$. And that means that the odds aregreater, in light of the data, than they were before.If the Bayes factor is less than 1, that means the data were less likely under $A$ than under $B$, so the odds in favor of $A$ go down.Finally, if the Bayes factor is exactly 1, the data are equally likelyunder either hypothesis, so the odds do not change. Let's apply that to the problem at hand. If Oliver is one of the people who left blood at the crime scene, he accounts for the ‘O’ sample; in that case, the probability of the data is the probability that a random member of the population has type ‘AB’ blood, which is 1%.If Oliver did not leave blood at the scene, we have two samples toaccount for. If we choose two random people from the population, what is the chance of finding one with type ‘O’ and one with type ‘AB’? Well, there are two ways it might happen: * The first person might have ‘O’ and the second ‘AB’, * Or the first person might have ‘AB’ and the second ‘O’.The probability of either combination is $(0.6) (0.01)$, which is 0.6%, so the total probability is twice that, or 1.2%.So the data are a little more likely if Oliver is *not* one of the people who left blood at the scene.We can use these probabilities to compute the likelihood ratio:
###Code
like1 = 0.01
like2 = 2 * 0.6 * 0.01
likelihood_ratio = like1 / like2
likelihood_ratio
###Output
_____no_output_____
###Markdown
Since the likelihood ratio is less than 1, the blood tests are evidence *against* the hypothesis that Oliver left blood at the scence.But it is weak evidence. For example, if the prior odds were 1 (that is, 50% probability), the posterior odds would be 0.83, which corresponds to a probability of 45%:
###Code
post_odds = 1 * like1 / like2
prob(post_odds)
###Output
_____no_output_____
###Markdown
So this evidence doesn't "move the needle" very much.This example is a little contrived, but it demonstrates thecounterintuitive result that data *consistent* with a hypothesis arenot necessarily *in favor of* the hypothesis.If this result still bothers you, this way of thinking might help: thedata consist of a common event, type ‘O’ blood, and a rare event, type‘AB’ blood. If Oliver accounts for the common event, that leaves therare event unexplained. If Oliver doesn’t account for the ‘O’ blood, wehave two chances to find someone in the population with ‘AB’ blood. Andthat factor of two makes the difference. **Exercise:** Suppose that based on other evidence, you prior belief in Oliver's guilt is 90%. How much would the blood evidence in this section change your beliefs? What if you initially thought there was only a 10% chance of his guilt?
###Code
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
AddendsThe second half of this chapter is about distributions of sums and results of other operations.We'll start with a forward problem, where we are given the inputs and compute the distribution of the output.Then we'll work on inverse problems, where we are given the outputs and we compute the distribution of the inputs.As a first example, suppose you roll two dice and add them up. What is the distribution of the sum? I’ll use the following function to create a `Pmf` that represents thepossible outcomes of a die:
###Code
import numpy as np
from empiricaldist import Pmf
def make_die(sides):
outcomes = np.arange(1, sides+1)
die = Pmf(1/sides, outcomes)
return die
###Output
_____no_output_____
###Markdown
On a six-sided die, the outcomes are 1 through 6, allequally likely.
###Code
die = make_die(6)
from utils import decorate
die.bar(alpha=0.4)
decorate(xlabel='Outcome',
ylabel='PMF')
###Output
_____no_output_____
###Markdown
If we roll two dice and add them up, there are 11 possible outcomes, 2through 12, but they are not equally likely. To compute the distributionof the sum, we have to enumerate the possible outcomes. And that's how this function works:
###Code
def add_dist(pmf1, pmf2):
"""Compute the distribution of a sum."""
res = Pmf()
for q1, p1 in pmf1.items():
for q2, p2 in pmf2.items():
q = q1 + q2
p = p1 * p2
res[q] = res(q) + p
return res
###Output
_____no_output_____
###Markdown
The parameters are `Pmf` objects representing distributions.The loops iterate though the quantities and probabilities in the `Pmf` objects.Each time through the loop `q` gets the sum of a pair of quantities, and `p` gets the probability of the pair.Because the same sum might appear more than once, we have to add up the total probability for each sum. Notice a subtle element of this line:``` res[q] = res(q) + p```I use parentheses on the right side of the assignment, which returns 0 if `q` does not appear yet in `res`.I use brackets on the left side of the assignment to create or update an element in `res`; using parentheses on the left side would not work.`Pmf` provides `add_dist`, which does the same thing.You can call it as a method, like this:
###Code
twice = die.add_dist(die)
###Output
_____no_output_____
###Markdown
Or as a function, like this:
###Code
twice = Pmf.add_dist(die, die)
###Output
_____no_output_____
###Markdown
And here's what the result looks like:
###Code
from utils import decorate
def decorate_dice(title=''):
decorate(xlabel='Outcome',
ylabel='PMF',
title=title)
twice = add_dist(die, die)
twice.bar(color='C1', alpha=0.5)
decorate_dice()
###Output
_____no_output_____
###Markdown
If we have a sequence of `Pmf` objects that represent dice, we can compute the distribution of the sum like this:
###Code
def add_dist_seq(seq):
"""Compute Pmf of the sum of values from seq."""
total = seq[0]
for other in seq[1:]:
total = total.add_dist(other)
return total
###Output
_____no_output_____
###Markdown
As an example, we can make a list of three dice like this:
###Code
dice = [die] * 3
###Output
_____no_output_____
###Markdown
And we can compute the distribution of their sum like this.
###Code
thrice = add_dist_seq(dice)
###Output
_____no_output_____
###Markdown
The following figure shows what these three distributions look like:- The distribution of a single die is uniform from 1 to 6.- The sum of two dice has a triangle distribution between 2 and 12.- The sum of three dice has a bell-shaped distribution between 3 and 18.
###Code
import matplotlib.pyplot as plt
die.plot(label='once')
twice.plot(label='twice', style='--')
thrice.plot(label='thrice', style=':')
plt.xticks([0,3,6,9,12,15,18])
decorate_dice(title='Distributions of sums')
###Output
_____no_output_____
###Markdown
As an aside, this example demonstrates the Central Limit Theorem, which says that the distribution of a sum converges on a bell-shaped normal distribution, at least under some conditions. Gluten SensitivityIn 2015 I read a paper that tested whether people diagnosed with gluten sensitivity (but not celiac disease) were able to distinguish gluten flour from non-gluten flour in a blind challenge([you can read the paper here](https://onlinelibrary.wiley.com/doi/full/10.1111/apt.13372)).Out of 35 subjects, 12 correctly identified the gluten flour based onresumption of symptoms while they were eating it. Another 17 wronglyidentified the gluten-free flour based on their symptoms, and 6 wereunable to distinguish.The authors conclude, "Double-blind gluten challenge induces symptomrecurrence in just one-third of patients."This conclusion seems odd to me, because if none of the patients weresensitive to gluten, we would expect some of them to identify the gluten flour by chance. So here's the question: based on this data, how many of the subjects are sensitive to gluten and how many are guessing?We can use Bayes's Theorem to answer this question, but first we have to make some modeling decisions. I'll assume:- People who are sensitive to gluten have a 95% chance of correctly identifying gluten flour under the challenge conditions, and- People who are not sensitive have a 40% chance of identifying the gluten flour by chance (and a 60% chance of either choosing the other flour or failing to distinguish).These particular values are arbitrary, but the results are not sensitive to these choices.I will solve this problem in two steps. First, assuming that we know how many subjects are sensitive, I will compute the distribution of the data. Then, using the likelihood of the data, I will compute the posterior distribution of the number of sensitive patients.The first is the **forward problem**; the second is the **inverseproblem**. The Forward ProblemSuppose we know that 10 of the 35 subjects are sensitive to gluten. Thatmeans that 25 are not:
###Code
n = 35
num_sensitive = 10
num_insensitive = n - num_sensitive
###Output
_____no_output_____
###Markdown
Each sensitive subject has a 95% chance of identifying the gluten flour,so the number of correct identifications follows a binomial distribution.I'll use `make_binomial`, which we defined in >, to make a `Pmf` that represents the binomial distribution.
###Code
from utils import make_binomial
dist_sensitive = make_binomial(num_sensitive, 0.95)
dist_insensitive = make_binomial(num_insensitive, 0.40)
###Output
_____no_output_____
###Markdown
The results are the distributions for the number of correct identifications in each group.Now we can use `add_dist` to compute the distribution of the total number of correct identifications:
###Code
dist_total = Pmf.add_dist(dist_sensitive, dist_insensitive)
###Output
_____no_output_____
###Markdown
Here are the results:
###Code
dist_sensitive.plot(label='sensitive', style=':')
dist_insensitive.plot(label='insensitive', style='--')
dist_total.plot(label='total')
decorate(xlabel='Number of correct identifications',
ylabel='PMF',
title='Gluten sensitivity')
###Output
_____no_output_____
###Markdown
We expect most of the sensitive subjects to identify the gluten flour correctly.Of the 25 insensitive subjects, we expect about 10 to identify the gluten flour by chance.So we expect about 20 correct identifications in total.This is the answer to the forward problem: given the number of sensitive subjects, we can compute the distribution of the data. The Inverse ProblemNow let's solve the inverse problem: given the data, we'll compute the posterior distribution of the number of sensitive subjects.Here's how. I'll loop through the possible values of `num_sensitive` and compute the distribution of the data for each:
###Code
import pandas as pd
table = pd.DataFrame()
for num_sensitive in range(0, n+1):
num_insensitive = n - num_sensitive
dist_sensitive = make_binomial(num_sensitive, 0.95)
dist_insensitive = make_binomial(num_insensitive, 0.4)
dist_total = Pmf.add_dist(dist_sensitive, dist_insensitive)
table[num_sensitive] = dist_total
###Output
_____no_output_____
###Markdown
The loop enumerates the possible values of `num_sensitive`.For each value, it computes the distribution of the total number of correct identifications, and stores the result as a column in a Pandas `DataFrame`.
###Code
table.head(3)
###Output
_____no_output_____
###Markdown
The following figure shows selected columns from the `DataFrame`, corresponding to different hypothetical values of `num_sensitive`:
###Code
table[0].plot(label='num_sensitive = 0')
table[10].plot(label='num_sensitive = 10')
table[20].plot(label='num_sensitive = 20', style='--')
table[30].plot(label='num_sensitive = 30', style=':')
decorate(xlabel='Number of correct identifications',
ylabel='PMF',
title='Gluten sensitivity')
###Output
_____no_output_____
###Markdown
Now we can use this table to compute the likelihood of the data:
###Code
likelihood1 = table.loc[12]
###Output
_____no_output_____
###Markdown
`loc` selects a row from the `DataFrame`.The row with index 12 contains the probability of 12 correct identifications for each hypothetical value of `num_sensitive`.And that's exactly the likelihood we need to do a Bayesian update.I'll use a uniform prior, which implies that I would be equally surprised by any value of `num_sensitive`:
###Code
hypos = np.arange(n+1)
prior = Pmf(1, hypos)
###Output
_____no_output_____
###Markdown
And here's the update:
###Code
posterior1 = prior * likelihood1
posterior1.normalize()
###Output
_____no_output_____
###Markdown
For comparison, I also compute the posterior for another possible outcome, 20 correct identifications.
###Code
likelihood2 = table.loc[20]
posterior2 = prior * likelihood2
posterior2.normalize()
###Output
_____no_output_____
###Markdown
The following figure shows posterior distributions of `num_sensitive` based on the actual data, 12 correct identifications, and the other possible outcome, 20 correct identifications.
###Code
posterior1.plot(label='posterior with 12 correct', color='C4')
posterior2.plot(label='posterior with 20 correct', color='C1')
decorate(xlabel='Number of sensitive subjects',
ylabel='PMF',
title='Posterior distributions')
###Output
_____no_output_____
###Markdown
With 12 correct identifications, the most likely conclusion is that none of the subjects are sensitive to gluten.If there had been 20 correct identifications, the most likely conclusion would be that 11-12 of the subjects were sensitive.
###Code
posterior1.max_prob()
posterior2.max_prob()
###Output
_____no_output_____
###Markdown
SummaryThis chapter presents two topics that are almost unrelated except that they make the title of the chapter catchy.The first part of the chapter is about Bayes's Rule, evidence, and how we can quantify the strength of evidence using a likelihood ratio or Bayes factor.The second part is about `add_dist`, which computes the distribution of a sum.We can use this function to solve forward and inverse problems; that is, given the parameters of a system, we can compute the distribution of the data or, given the data, we can compute the distribution of the parameters.In the next chapter, we'll compute distributions for minimums and maximums, and use them to solve more Bayesian problems.But first you might want to work on these exercises. Exercises **Exercise:** Let's use Bayes's Rule to solve the Elvis problem from >:> Elvis Presley had a twin brother who died at birth. What is the probability that Elvis was an identical twin?In 1935, about 2/3 of twins were fraternal and 1/3 were identical.The question contains two pieces of information we can use to update this prior.* First, Elvis's twin was also male, which is more likely if they were identical twins, with a likelihood ratio of 2.* Also, Elvis's twin died at birth, which is more likely if they were identical twins, with a likelihood ratio of 1.25.If you are curious about where those numbers come from, I wrote [a blog post about it](https://www.allendowney.com/blog/2020/01/28/the-elvis-problem-revisited).
###Code
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** The following is an [interview question that appeared on glassdoor.com](https://www.glassdoor.com/Interview/You-re-about-to-get-on-a-plane-to-Seattle-You-want-to-know-if-you-should-bring-an-umbrella-You-call-3-random-friends-of-y-QTN_519262.htm), attributed to Facebook:> You're about to get on a plane to Seattle. You want to know if you should bring an umbrella. You call 3 random friends of yours who live there and ask each independently if it's raining. Each of your friends has a 2/3 chance of telling you the truth and a 1/3 chance of messing with you by lying. All 3 friends tell you that "Yes" it is raining. What is the probability that it's actually raining in Seattle?Use Bayes's Rule to solve this problem. As a prior you can assume that it rains in Seattle about 10% of the time.This question causes some confusion about the differences between Bayesian and frequentist interpretations of probability; if you are curious about this point, [I wrote a blog article about it](http://allendowney.blogspot.com/2016/09/bayess-theorem-is-not-optional.html).
###Code
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** [According to the CDC](https://www.cdc.gov/tobacco/data_statistics/fact_sheets/health_effects/effects_cig_smoking), people who smoke are about 25 times more likely to develop lung cancer than nonsmokers.[Also according to the CDC](https://www.cdc.gov/tobacco/data_statistics/fact_sheets/adult_data/cig_smoking/index.htm), about 14\% of adults in the U.S. are smokers.If you learn that someone has lung cancer, what is the probability they are a smoker?
###Code
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** In *Dungeons & Dragons*, the amount of damage a goblin can withstand is the sum of two six-sided dice. The amount of damage you inflict with a short sword is determined by rolling one six-sided die.A goblin is defeated if the total damage you inflict is greater than or equal to the amount it can withstand.Suppose you are fighting a goblin and you have already inflicted 3 points of damage. What is your probability of defeating the goblin with your next successful attack?Hint: You can use `Pmf.add_dist` to add a constant amount, like 3, to a `Pmf` and `Pmf.sub_dist` to compute the distribution of remaining points.
###Code
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** Suppose I have a box with a 6-sided die, an 8-sided die, and a 12-sided die.I choose one of the dice at random, roll it twice, multiply the outcomes, and report that the product is 12.What is the probability that I chose the 8-sided die?Hint: `Pmf` provides a function called `mul_dist` that takes two `Pmf` objects and returns a `Pmf` that represents the distribution of the product.
###Code
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** *Betrayal at House on the Hill* is a strategy game in which characters with different attributes explore a haunted house. Depending on their attributes, the characters roll different numbers of dice. For example, if attempting a task that depends on knowledge, Professor Longfellow rolls 5 dice, Madame Zostra rolls 4, and Ox Bellows rolls 3. Each die yields 0, 1, or 2 with equal probability. If a randomly chosen character attempts a task three times and rolls a total of 3 on the first attempt, 4 on the second, and 5 on the third, which character do you think it was?
###Code
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** There are 538 members of the United States Congress. Suppose we audit their investment portfolios and find that 312 of them out-perform the market.Let's assume that an honest member of Congress has only a 50% chance of out-performing the market, but a dishonest member who trades on inside information has a 90% chance. How many members of Congress are honest?
###Code
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
Modeling and Simulation in PythonChapter 6Copyright 2017 Allen DowneyLicense: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)
###Code
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
from pandas import read_html
###Output
_____no_output_____
###Markdown
Code from the previous chapter
###Code
filename = 'data/World_population_estimates.html'
tables = read_html(filename, header=0, index_col=0, decimal='M')
table2 = tables[2]
table2.columns = ['census', 'prb', 'un', 'maddison',
'hyde', 'tanton', 'biraben', 'mj',
'thomlinson', 'durand', 'clark']
un = table2.un / 1e9
un.head()
census = table2.census / 1e9
census.head()
t_0 = get_first_label(census)
t_end = get_last_label(census)
elapsed_time = t_end - t_0
p_0 = get_first_value(census)
p_end = get_last_value(census)
total_growth = p_end - p_0
annual_growth = total_growth / elapsed_time
###Output
_____no_output_____
###Markdown
System objects We can rewrite the code from the previous chapter using system objects.
###Code
system = System(t_0=t_0,
t_end=t_end,
p_0=p_0,
annual_growth=annual_growth)
###Output
_____no_output_____
###Markdown
And we can encapsulate the code that runs the model in a function.
###Code
def run_simulation1(system):
"""Runs the constant growth model.
system: System object
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linrange(system.t_0, system.t_end):
results[t+1] = results[t] + system.annual_growth
return results
###Output
_____no_output_____
###Markdown
We can also encapsulate the code that plots the results.
###Code
def plot_results(census, un, timeseries, title):
"""Plot the estimates and the model.
census: TimeSeries of population estimates
un: TimeSeries of population estimates
timeseries: TimeSeries of simulation results
title: string
"""
plot(census, ':', label='US Census')
plot(un, '--', label='UN DESA')
plot(timeseries, color='gray', label='model')
decorate(xlabel='Year',
ylabel='World population (billion)',
title=title)
###Output
_____no_output_____
###Markdown
Here's how we run it.
###Code
results = run_simulation1(system)
plot_results(census, un, results, 'Constant growth model')
###Output
_____no_output_____
###Markdown
Proportional growth Here's a more realistic model where the number of births and deaths is proportional to the current population.
###Code
def run_simulation2(system):
"""Run a model with proportional birth and death.
system: System object
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linrange(system.t_0, system.t_end):
births = system.birth_rate * results[t]
deaths = system.death_rate * results[t]
results[t+1] = results[t] + births - deaths
return results
###Output
_____no_output_____
###Markdown
I picked a death rate that seemed reasonable and then adjusted the birth rate to fit the data.
###Code
system.death_rate = 0.01
system.birth_rate = 0.027
###Output
_____no_output_____
###Markdown
Here's what it looks like.
###Code
results = run_simulation2(system)
plot_results(census, un, results, 'Proportional model')
savefig('figs/chap06-fig01.pdf')
###Output
Saving figure to file figs/chap06-fig01.pdf
###Markdown
The model fits the data pretty well for the first 20 years, but not so well after that. Factoring out the update function `run_simulation1` and `run_simulation2` are nearly identical except the body of the loop. So we can factor that part out into a function.
###Code
def update_func1(pop, t, system):
"""Compute the population next year.
pop: current population
t: current year
system: system object containing parameters of the model
returns: population next year
"""
births = system.birth_rate * pop
deaths = system.death_rate * pop
return pop + births - deaths
###Output
_____no_output_____
###Markdown
The name `update_func` refers to a function object.
###Code
update_func1
###Output
_____no_output_____
###Markdown
Which we can confirm by checking its type.
###Code
type(update_func1)
###Output
_____no_output_____
###Markdown
`run_simulation` takes the update function as a parameter and calls it just like any other function.
###Code
def run_simulation(system, update_func):
"""Simulate the system using any update function.
system: System object
update_func: function that computes the population next year
returns: TimeSeries
"""
results = TimeSeries()
results[system.t_0] = system.p_0
for t in linrange(system.t_0, system.t_end):
results[t+1] = update_func(results[t], t, system)
return results
###Output
_____no_output_____
###Markdown
Here's how we use it.
###Code
t_0 = get_first_label(census)
t_end = get_last_label(census)
p_0 = census[t_0]
system = System(t_0=t_0,
t_end=t_end,
p_0=p_0,
birth_rate=0.027,
death_rate=0.01)
results = run_simulation(system, update_func1)
plot_results(census, un, results, 'Proportional model, factored')
###Output
_____no_output_____
###Markdown
Remember not to put parentheses after `update_func1`. What happens if you try?
###Code
update_func1()
###Output
_____no_output_____
###Markdown
**Exercise:** When you run `run_simulation`, it runs `update_func1` once for each year between `t_0` and `t_end`. To see that for yourself, add a print statement at the beginning of `update_func1` that prints the values of `t` and `pop`, then run `run_simulation` again.
###Code
def update_func1(pop, t, system):
"""Compute the population next year.
pop: current population
t: current year
system: system object containing parameters of the model
returns:population next year
"""
print('Population: {} Year: {}'.format(pop, t))
births = system.birth_rate * pop
deaths = system.death_rate * pop
return pop + births - deaths
results = run_simulation(system, update_func1)
###Output
Population: 2.557628654 Year: 1950
Population: 2.601108341118 Year: 1951
Population: 2.645327182917006 Year: 1952
Population: 2.6902977450265952 Year: 1953
Population: 2.7360328066920476 Year: 1954
Population: 2.7825453644058125 Year: 1955
Population: 2.829848635600711 Year: 1956
Population: 2.8779560624059233 Year: 1957
Population: 2.926881315466824 Year: 1958
Population: 2.9766382978297603 Year: 1959
Population: 3.0272411488928666 Year: 1960
Population: 3.078704248424045 Year: 1961
Population: 3.131042220647254 Year: 1962
Population: 3.184269938398258 Year: 1963
Population: 3.2384025273510284 Year: 1964
Population: 3.293455370315996 Year: 1965
Population: 3.349444111611368 Year: 1966
Population: 3.406384661508761 Year: 1967
Population: 3.46429320075441 Year: 1968
Population: 3.523186185167235 Year: 1969
Population: 3.583080350315078 Year: 1970
Population: 3.6439927162704344 Year: 1971
Population: 3.7059405924470314 Year: 1972
Population: 3.768941582518631 Year: 1973
Population: 3.833013589421448 Year: 1974
Population: 3.8981748204416125 Year: 1975
Population: 3.9644437923891203 Year: 1976
Population: 4.031839336859735 Year: 1977
Population: 4.100380605586351 Year: 1978
Population: 4.170087075881319 Year: 1979
Population: 4.240978556171301 Year: 1980
Population: 4.313075191626214 Year: 1981
Population: 4.3863974698838595 Year: 1982
Population: 4.460966226871885 Year: 1983
Population: 4.536802652728707 Year: 1984
Population: 4.613928297825096 Year: 1985
Population: 4.692365078888122 Year: 1986
Population: 4.77213528522922 Year: 1987
Population: 4.8532615850781164 Year: 1988
Population: 4.935767032024445 Year: 1989
Population: 5.0196750715688605 Year: 1990
Population: 5.1050095477855315 Year: 1991
Population: 5.191794710097886 Year: 1992
Population: 5.28005522016955 Year: 1993
Population: 5.369816158912433 Year: 1994
Population: 5.461103033613944 Year: 1995
Population: 5.553941785185382 Year: 1996
Population: 5.648358795533533 Year: 1997
Population: 5.744380895057604 Year: 1998
Population: 5.8420353702735826 Year: 1999
Population: 5.941349971568234 Year: 2000
Population: 6.042352921084894 Year: 2001
Population: 6.145072920743337 Year: 2002
Population: 6.249539160395973 Year: 2003
Population: 6.355781326122704 Year: 2004
Population: 6.46382960866679 Year: 2005
Population: 6.573714712014126 Year: 2006
Population: 6.685467862118366 Year: 2007
Population: 6.799120815774378 Year: 2008
Population: 6.9147058696425425 Year: 2009
Population: 7.032255869426465 Year: 2010
Population: 7.151804219206714 Year: 2011
Population: 7.273384890933229 Year: 2012
Population: 7.397032434079093 Year: 2013
Population: 7.522781985458438 Year: 2014
Population: 7.650669279211232 Year: 2015
###Markdown
Combining birth and death Since births and deaths get added up, we don't have to compute them separately. We can combine the birth and death rates into a single net growth rate.
###Code
def update_func2(pop, t, system):
"""Compute the population next year.
pop: current population
t: current year
system: system object containing parameters of the model
returns: population next year
"""
net_growth = system.alpha * pop
return pop + net_growth
###Output
_____no_output_____
###Markdown
Here's how it works:
###Code
system.alpha = system.birth_rate - system.death_rate
results = run_simulation(system, update_func2)
plot_results(census, un, results, 'Proportional model, combined birth and death')
###Output
_____no_output_____
###Markdown
Exercises**Exercise:** Maybe the reason the proportional model doesn't work very well is that the growth rate, `alpha`, is changing over time. So let's try a model with different growth rates before and after 1980 (as an arbitrary choice).Write an update function that takes `pop`, `t`, and `system` as parameters. The system object, `system`, should contain two parameters: the growth rate before 1980, `alpha1`, and the growth rate after 1980, `alpha2`. It should use `t` to determine which growth rate to use. Note: Don't forget the `return` statement.Test your function by calling it directly, then pass it to `run_simulation`. Plot the results. Adjust the parameters `alpha1` and `alpha2` to fit the data as well as you can.
###Code
system.alpha
system.alpha1 = .019
system.alpha2 = .0142
# Solution goes here\
def update_func3(pop, t, system):
"""Compute the population next year.
pop: current population
t: current year
system: system object containing parameters of the model
returns: population next year
"""
if t <= 1980:
net_growth = system.alpha1 * pop
else:
net_growth = system.alpha2 * pop
return pop + net_growth
# Solution goes here
results = run_simulation(system, update_func3)
plot_results(census, un, results, 'Proportional model, combined birth and death')
###Output
_____no_output_____ |
ICCT_it/examples/03/FD-01_Funzioni_di_trasferimento.ipynb | ###Markdown
Funzioni di trasferimentoNell'esempio seguente, daremo uno sguardo alla formulazione delle funzioni di trasferimento. Sebbene siano utilizzate in vari campi, nella teoria del controllo le funzioni di trasferimento descrivono le caratteristiche di trasferimento di frequenza di un sistema lineare tempo-invariante (LTI).Generalmente, una funzione di trasferimento descrive la relazione tra la trasformata di Laplace dei segnali $u(t)$ in ingresso e i segnali $y(t)$ in uscita:$$U(s)=\mathcal{L}\{u(t)\} \quad Y(s)=\mathcal{L}\{y(t)\}$$$$Y(s)=G(s)U(s)$$Nel caso dei sistemi LTI, la funzione di trasferimento può essere descritta come una frazione di due polinomi:$$G(s)=\frac{\sum\limits_{j=0}^{m}b_js^j}{\sum\limits_{i=0}^{n}a_is^i},$$dove il denominatore $a(s)$ è il polinomio caratteristico del sistema.Normalizzando i componenti in $a^n=1$, le forme risultanti sono:$$a(s) = s^n + a_{n-1}s^{n-1} + a_{n-2}s^{n-2} + ... + a_1s^1 + a_0$$$$b(s) = b_ms^m + b_{m-1}s^{m-1} + b_{m-2}s^{m-2} + ... + b_1s^1 + b_0$$Per i sistemi fisici, il grado del numeratore è inferiore al grado del denominatore; questo tipo è chiamato funzione di trasferimento strettamente propria. In questo caso, la funzione di trasferimento converge a zero all'infinito.Assembla una funzione di trasferimento selezionando i termini polinomiali!
###Code
b = {}
a = {}
b[0] = w.FloatText(value=1.0, description='', disabled=False, step=0.1, layout=w.Layout(width='11%'))
b[1] = w.FloatText(value=1.0, description='', disabled=False, step=0.1, layout=w.Layout(width='11%'))
b[2] = w.FloatText(value=0.0, description='', disabled=False, step=0.1, layout=w.Layout(width='11%'))
b[3] = w.FloatText(value=0.0, description='', disabled=False, step=0.1, layout=w.Layout(width='11%'))
b[4] = w.FloatText(value=0.0, description='', disabled=False, step=0.1, layout=w.Layout(width='11%'))
a[0] = w.FloatText(value=10.0, description='', disabled=False, step=0.1, layout=w.Layout(width='11%'))
a[1] = w.FloatText(value=1.0, description='', disabled=False, step=0.1, layout=w.Layout(width='11%'))
a[2] = w.FloatText(value=0.0, description='', disabled=False, step=0.1, layout=w.Layout(width='11%'))
a[3] = w.FloatText(value=0.0, description='', disabled=False, step=0.1, layout=w.Layout(width='11%'))
a[4] = w.FloatText(value=0.0, description='', disabled=False, step=0.1, layout=w.Layout(width='11%'))
a[5] = w.FloatText(value=0.0, description='', disabled=False, step=0.1, layout=w.Layout(width='11%'))
def transfer_function(a0, a1, a2, a3, a4, a5, b0, b1, b2, b3, b4):
b1c = b1
b2c = b2
b3c = b3
b4c = b4
global b
if a5 == 0:
b[4].disabled=True
b4c = 0
else:
b[4].disabled=False
if a5 == 0 and a4==0:
b[3].disabled=True
b3c = 0
else:
b[3].disabled=False
if a5 == 0 and a4 == 0 and a3 == 0:
b[2].disabled=True
b2c = 0
else:
b[2].disabled=False
if a5 == 0 and a4 == 0 and a3 == 0 and a2 == 0:
b[1].disabled=True
b1c = 0
else:
b[1].disabled=False
G = c.tf([b4c, b3c, b2c, b1c, b0], [a5, a4, a3, a2, a1, a0]) # Transfer function
print('Funzione di trasferimento:')
print(G)
input_data = w.interactive_output(transfer_function, {'a0':a[0], 'a1':a[1], 'a2':a[2], 'a3':a[3], 'a4':a[4], 'a5':a[5],
'b0':b[0], 'b1':b[1], 'b2':b[2], 'b3':b[3], 'b4':b[4]})
display(w.HBox([w.VBox([w.Label('$G(s)=$')], layout=w.Layout(justify_content="center", align_items='flex-start')),
w.VBox([w.HBox([b[4], w.Label('$s^4+$'), b[3], w.Label('$s^3+$'), b[2], w.Label('$s^2+$'),
b[1], w.Label('$s+$'), b[0]], layout=w.Layout(justify_content='center')),
w.HBox([w.HTML(value='<hr style="border-top: 1px solid black">', layout=w.Layout(width='100%'))],
layout=w.Layout(justify_content='center')),
w.HBox([a[5], w.Label('$s^5+$'), a[4], w.Label('$s^4+$'), a[3], w.Label('$s^3+$'), a[2], w.Label('$s^2+$'),
a[1], w.Label('$s+$'), a[0]], layout=w.Layout(justify_content='center')) ],
layout=w.Layout(width='70%'))], layout=w.Layout(justify_content='center') ), input_data)
###Output
_____no_output_____
###Markdown
Le soluzioni dei polinomi del numeratore e del denominatore sono chiamate rispettivamente zeri e poli del sistema. Possono essere utilizzati per determinare la stabilità del sistema e valutarne le prestazioni.Sia gli zeri che i poli sono numeri reali o coppie coniugate complesse (se le equazioni differenziali del sistema sono a valori reali).Sperimenta variando i termini polinomiali e osserva i cambiamenti dei poli e zeri nel grafico!
###Code
fig1, (f1_ax1) = plt.subplots(1, 1)
fig1.set_size_inches((5, 5))
fig1.set_tight_layout(True)
f1_line1, = f1_ax1.plot([], [], 'rs')
f1_line2, = f1_ax1.plot([], [], 'bo')
f1_ax1.axhline(y=0, color='k', lw=0.5)
f1_ax1.axvline(x=0, color='k', lw=0.5)
f1_ax1.grid(which='both', axis='both', color='lightgray')
f1_ax1.autoscale(enable=True, axis='both')
f1_ax1.set_title('Mappa Poli-Zeri', fontsize=12)
f1_ax1.set_xlabel('Re', labelpad=0, fontsize=10)
f1_ax1.set_ylabel('Im', labelpad=0, fontsize=10)
f1_ax1.tick_params(axis='both', which='both', pad=0, labelsize=8)
f1_ax1.legend([f1_line1, f1_line2], ['Zeri', 'Poli'])
def pz_map(a0, a1, a2, a3, a4, a5, b0, b1, b2, b3, b4):
b1c = b1
b2c = b2
b3c = b3
b4c = b4
if a5 == 0:
b4c = 0
if a5 == 0 and a4==0:
b3c = 0
if a5 == 0 and a4 == 0 and a3 == 0:
b2c = 0
if a5 == 0 and a4 == 0 and a3 == 0 and a2 == 0:
b1c = 0
G = c.tf([b4c, b3c, b2c, b1c, b0], [a5, a4, a3, a2, a1, a0]) # Transfer function
p = c.pole(G) # Poles
z = c.zero(G) # Zeros
px = [x.real for x in p]
py = [x.imag for x in p]
zx = [x.real for x in z]
zy = [x.imag for x in z]
global f1_line1, f1_line2
try:
f1_ax1.lines.remove(f1_line1)
f1_ax1.lines.remove(f1_line2)
except:
pass
f1_line1, = f1_ax1.plot(zx, zy, 'rs')
f1_line2, = f1_ax1.plot(px, py, 'bo')
f1_ax1.relim()
f1_ax1.autoscale_view()
w.interactive_output(pz_map, {'a0':a[0], 'a1':a[1], 'a2':a[2], 'a3':a[3], 'a4':a[4], 'a5':a[5],
'b0':b[0], 'b1':b[1], 'b2':b[2], 'b3':b[3], 'b4':b[4]})
###Output
_____no_output_____
###Markdown
La controparte nel dominio del tempo della funzione di trasferimento è la risposta all'impulso: $g(t)$. Poiché la trasformata di Laplace della funzione delta di Dirac è la costante 1, la funzione di trasferimento di un sistema LTI può essere collegata alla sua risposta all'impulso.$$\mathcal{L}\{\delta(t)\} = 1$$$$1\cdot G(s) \; \xrightarrow{\mathcal{L}^{-1}} \; \delta(t)*g(t)$$$$Y(s) = G(s)U(s) \; \xrightarrow{\mathcal{L}^{-1}} \; y(t) = \int\limits_{-\infty}^{\infty}g(t-\tau)u(\tau) d\tau,$$dove, secondo le regole di trasformazione di Laplace, la moltiplicazione nel dominio della frequenza è abbinata alla convoluzione nel dominio del tempo e la convoluzione di una funzione con il delta di Dirac restituisce la stessa funzione.Osserva i cambiamenti nella risposta all'impulso per varie configurazioni!
###Code
fig2, (f2_ax1) = plt.subplots(1, 1)
fig2.set_size_inches((9.8, 5))
fig2.set_tight_layout(True)
f2_line1, = f2_ax1.plot([], [])
f2_ax1.grid(which='both', axis='both', color='lightgray')
f2_ax1.autoscale(enable=True, axis='both')
f2_ax1.set_title('Risposta all\'impulso', fontsize=12)
f2_ax1.set_xlabel(r'$t$ [s]', labelpad=0, fontsize=10)
f2_ax1.set_ylabel(r'$y(t)$ [/]', labelpad=0, fontsize=10)
f2_ax1.tick_params(axis='both', which='both', pad=0, labelsize=8)
def inp_resp(a0, a1, a2, a3, a4, a5, b0, b1, b2, b3, b4):
b1c = b1
b2c = b2
b3c = b3
b4c = b4
if a5 == 0:
b4c = 0
if a5 == 0 and a4==0:
b3c = 0
if a5 == 0 and a4 == 0 and a3 == 0:
b2c = 0
if a5 == 0 and a4 == 0 and a3 == 0 and a2 == 0:
b1c = 0
G = c.tf([b4c, b3c, b2c, b1c, b0], [a5, a4, a3, a2, a1, a0]) # Transfer function
tout, yout = c.impulse_response(G)
global f2_line1
try:
f2_ax1.lines.remove(f2_line1)
except:
pass
f2_line1, = f2_ax1.plot(np.concatenate(([0], tout)), np.concatenate(([0], yout)), '-b')
f2_ax1.relim()
f2_ax1.autoscale_view()
w.interactive_output(inp_resp, {'a0':a[0], 'a1':a[1], 'a2':a[2], 'a3':a[3], 'a4':a[4], 'a5':a[5],
'b0':b[0], 'b1':b[1], 'b2':b[2], 'b3':b[3], 'b4':b[4]})
###Output
_____no_output_____ |
.ipynb_checkpoints/Cuaderno3-checkpoint.ipynb | ###Markdown
Probando Hoja de trucos de Pandas Campo de aplicacion NumPy es una biblioteca extendida de lenguaje Python. Admite una gran cantidad de matrices dimensionales avanzadas y operaciones de matriz, y también proporciona una gran cantidad de bibliotecas de funciones matemáticas para operaciones de matriz. Pandas es una herramienta basada en NumPy, que fue creada para resolver tareas de análisis de datos. Pandas incorpora una gran cantidad de bibliotecas y algunos modelos de datos estándar, proporcionando las herramientas necesarias para manipular de manera eficiente grandes conjuntos de datos. Pandas proporciona una gran cantidad de funciones y métodos que nos permiten procesar datos de forma rápida y sencilla Utilizando las hojas de trucos de Pandas, complete cada uno de los ejercicios de pruebas.[Hoja de trucos de Pandas 1](https://uraccan1-my.sharepoint.com/personal/yesser_miranda_5fxt_d_uraccan_edu_ni/_layouts/15/onedrive.aspx?id=%2Fpersonal%2Fyesser%5Fmiranda%5F5fxt%5Fd%5Furaccan%5Fedu%5Fni%2FDocuments%2FDOCENCIA%2F2021%2FS2%2FFUNDAMENTOS%5FINTELIGENCIA%5FARTIFICIAL%2FLIBROS%2FPandas%5FCheatSheet%5F1%2Epdf&parent=%2Fpersonal%2Fyesser%5Fmiranda%5F5fxt%5Fd%5Furaccan%5Fedu%5Fni%2FDocuments%2FDOCENCIA%2F2021%2FS2%2FFUNDAMENTOS%5FINTELIGENCIA%5FARTIFICIAL%2FLIBROS&originalPath=aHR0cHM6Ly91cmFjY2FuMS1teS5zaGFyZXBvaW50LmNvbS86YjovZy9wZXJzb25hbC95ZXNzZXJfbWlyYW5kYV81Znh0X2RfdXJhY2Nhbl9lZHVfbmkvRWVsVDlZZlFmdFpHcUQyR2ozNHFSQm9CaDZFNXNSSnlaZkJtU1lHc3pmc2N2UT9ydGltZT1kdlVwUklOcTJVZw)[Hoja de trucos de Pandas 2](https://uraccan1-my.sharepoint.com/personal/yesser_miranda_5fxt_d_uraccan_edu_ni/_layouts/15/onedrive.aspx?id=%2Fpersonal%2Fyesser%5Fmiranda%5F5fxt%5Fd%5Furaccan%5Fedu%5Fni%2FDocuments%2FDOCENCIA%2F2021%2FS2%2FFUNDAMENTOS%5FINTELIGENCIA%5FARTIFICIAL%2FLIBROS%2FPandas%5FCheatSheet%5F2%2Epdf&parent=%2Fpersonal%2Fyesser%5Fmiranda%5F5fxt%5Fd%5Furaccan%5Fedu%5Fni%2FDocuments%2FDOCENCIA%2F2021%2FS2%2FFUNDAMENTOS%5FINTELIGENCIA%5FARTIFICIAL%2FLIBROS&originalPath=aHR0cHM6Ly91cmFjY2FuMS1teS5zaGFyZXBvaW50LmNvbS86YjovZy9wZXJzb25hbC95ZXNzZXJfbWlyYW5kYV81Znh0X2RfdXJhY2Nhbl9lZHVfbmkvRVZzU2NPUzhpZzFQdXJ2Wmp6YWQ4RzRCYU55eUJueDRKNXNLc0VPMTh0Zk1CQT9ydGltZT04WFhmUW9OcTJVZw) PANDAS HOJA DE TRUCOS 1 Pandas data structures
###Code
import pandas as pd
import numpy as np
#Series — One dimensional labeled array
s = pd.Series([3, -5, 7, 4], index=['a', 'b', 'c', 'd'])
print(s)
#Data Frame — A two dimensional labeled data structure
data = {'Country': ['Belgium', 'India', 'Brazil'],
'Capital': ['Brussels', 'New Delhi', 'Brasília'],
'Population': [11190846, 1303171035, 207847528]}
df = pd.DataFrame(data,
columns=['Country', 'Capital', 'Population'])
print(df)
path='datos_rrss.xlsx'
###Output
_____no_output_____
###Markdown
Read and Write to CSV
###Code
#Read CSV file
#pd.read_csv('file.csv', header=None, nrows=5)
#Write to CSV file
#df.to_csv('myDataFrame.csv')
pd.read_csv('datos_rrss_t1.csv')
###Output
_____no_output_____
###Markdown
Read and Write to Excel
###Code
#Read Excel file
#pd.read_excel('file.xlsx')
#Write to Excel file
#pd.to_excel('dir/myDataFrame.xlsx', sheet_name='Sheet1')
### Read multiple sheets from the same file
#xlsx = pd.ExcelFile('file.xls')
#df = pd.read_excel(xlsx, 'Sheet1')
pd.read_excel('datos_rrss.xlsx')
###Output
_____no_output_____
###Markdown
Asking for Help
###Code
help(pd.Series.loc)
###Output
Help on property:
Access a group of rows and columns by label(s) or a boolean array.
``.loc[]`` is primarily label based, but may also be used with a
boolean array.
Allowed inputs are:
- A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is
interpreted as a *label* of the index, and **never** as an
integer position along the index).
- A list or array of labels, e.g. ``['a', 'b', 'c']``.
- A slice object with labels, e.g. ``'a':'f'``.
.. warning:: Note that contrary to usual python slices, **both** the
start and the stop are included
- A boolean array of the same length as the axis being sliced,
e.g. ``[True, False, True]``.
- A ``callable`` function with one argument (the calling Series or
DataFrame) and that returns valid output for indexing (one of the above)
See more at :ref:`Selection by Label <indexing.label>`
Raises
------
KeyError:
when any items are not found
See Also
--------
DataFrame.at : Access a single value for a row/column label pair.
DataFrame.iloc : Access group of rows and columns by integer position(s).
DataFrame.xs : Returns a cross-section (row(s) or column(s)) from the
Series/DataFrame.
Series.loc : Access group of values using labels.
Examples
--------
**Getting values**
>>> df = pd.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=['cobra', 'viper', 'sidewinder'],
... columns=['max_speed', 'shield'])
>>> df
max_speed shield
cobra 1 2
viper 4 5
sidewinder 7 8
Single label. Note this returns the row as a Series.
>>> df.loc['viper']
max_speed 4
shield 5
Name: viper, dtype: int64
List of labels. Note using ``[[]]`` returns a DataFrame.
>>> df.loc[['viper', 'sidewinder']]
max_speed shield
viper 4 5
sidewinder 7 8
Single label for row and column
>>> df.loc['cobra', 'shield']
2
Slice with labels for row and single label for column. As mentioned
above, note that both the start and stop of the slice are included.
>>> df.loc['cobra':'viper', 'max_speed']
cobra 1
viper 4
Name: max_speed, dtype: int64
Boolean list with the same length as the row axis
>>> df.loc[[False, False, True]]
max_speed shield
sidewinder 7 8
Conditional that returns a boolean Series
>>> df.loc[df['shield'] > 6]
max_speed shield
sidewinder 7 8
Conditional that returns a boolean Series with column labels specified
>>> df.loc[df['shield'] > 6, ['max_speed']]
max_speed
sidewinder 7
Callable that returns a boolean Series
>>> df.loc[lambda df: df['shield'] == 8]
max_speed shield
sidewinder 7 8
**Setting values**
Set value for all items matching the list of labels
>>> df.loc[['viper', 'sidewinder'], ['shield']] = 50
>>> df
max_speed shield
cobra 1 2
viper 4 50
sidewinder 7 50
Set value for an entire row
>>> df.loc['cobra'] = 10
>>> df
max_speed shield
cobra 10 10
viper 4 50
sidewinder 7 50
Set value for an entire column
>>> df.loc[:, 'max_speed'] = 30
>>> df
max_speed shield
cobra 30 10
viper 30 50
sidewinder 30 50
Set value for rows matching callable condition
>>> df.loc[df['shield'] > 35] = 0
>>> df
max_speed shield
cobra 30 10
viper 0 0
sidewinder 0 0
**Getting values on a DataFrame with an index that has integer labels**
Another example using integers for the index
>>> df = pd.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=[7, 8, 9], columns=['max_speed', 'shield'])
>>> df
max_speed shield
7 1 2
8 4 5
9 7 8
Slice with integer labels for rows. As mentioned above, note that both
the start and stop of the slice are included.
>>> df.loc[7:9]
max_speed shield
7 1 2
8 4 5
9 7 8
**Getting values with a MultiIndex**
A number of examples using a DataFrame with a MultiIndex
>>> tuples = [
... ('cobra', 'mark i'), ('cobra', 'mark ii'),
... ('sidewinder', 'mark i'), ('sidewinder', 'mark ii'),
... ('viper', 'mark ii'), ('viper', 'mark iii')
... ]
>>> index = pd.MultiIndex.from_tuples(tuples)
>>> values = [[12, 2], [0, 4], [10, 20],
... [1, 4], [7, 1], [16, 36]]
>>> df = pd.DataFrame(values, columns=['max_speed', 'shield'], index=index)
>>> df
max_speed shield
cobra mark i 12 2
mark ii 0 4
sidewinder mark i 10 20
mark ii 1 4
viper mark ii 7 1
mark iii 16 36
Single label. Note this returns a DataFrame with a single index.
>>> df.loc['cobra']
max_speed shield
mark i 12 2
mark ii 0 4
Single index tuple. Note this returns a Series.
>>> df.loc[('cobra', 'mark ii')]
max_speed 0
shield 4
Name: (cobra, mark ii), dtype: int64
Single label for row and column. Similar to passing in a tuple, this
returns a Series.
>>> df.loc['cobra', 'mark i']
max_speed 12
shield 2
Name: (cobra, mark i), dtype: int64
Single tuple. Note using ``[[]]`` returns a DataFrame.
>>> df.loc[[('cobra', 'mark ii')]]
max_speed shield
cobra mark ii 0 4
Single tuple for the index with a single label for the column
>>> df.loc[('cobra', 'mark i'), 'shield']
2
Slice from index tuple to single label
>>> df.loc[('cobra', 'mark i'):'viper']
max_speed shield
cobra mark i 12 2
mark ii 0 4
sidewinder mark i 10 20
mark ii 1 4
viper mark ii 7 1
mark iii 16 36
Slice from index tuple to index tuple
>>> df.loc[('cobra', 'mark i'):('viper', 'mark ii')]
max_speed shield
cobra mark i 12 2
mark ii 0 4
sidewinder mark i 10 20
mark ii 1 4
viper mark ii 7 1
###Markdown
Selection Getting
###Code
#Get one element
s['b']
#Get subset of a DataFrame
df[1:]
###Output
_____no_output_____
###Markdown
Selecting, Boolean Indexing & Setting By Position
###Code
#Select single value by row & Column
df.iloc[0,0]
df.iat([0],[0])
###Output
_____no_output_____
###Markdown
By label
###Code
#Select single value by row and column labels
df.loc[0,'Country']
df.at([0], ['Country'])
###Output
_____no_output_____
###Markdown
By Label/Position
###Code
#Select single row of subset rows
df.ix[2]
#Select a single column of subset of columns
df.ix[:,'Capital']
#Select rows and columns
df.ix[1,'Capital']
###Output
C:\Users\POXS44\Anaconda3\lib\site-packages\ipykernel_launcher.py:2: FutureWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#ix-indexer-is-deprecated
###Markdown
Boolean Indexing
###Code
#Series s where value is not >1
s[~(s > 1)]
#s where value is <-1 or >2
s[(s < -1) | (s > 2)]
#Use filter to adjust DataFrame
df[df['Population']>1200000000]
###Output
_____no_output_____
###Markdown
Setting
###Code
# Set index a of Series s to 6
s['a'] = 6
print(s)
###Output
a 6
b -5
c 7
d 4
dtype: int64
###Markdown
Read and Write to SQL Query or Database Table
###Code
# Read SqL Query
from sqlalchemy import create_engine
engine = create_engine('sqlite:///:memory:')
pd.read_sql("SELECT * FROM my_table;", engine)
pd.read_sql_table('my_table', engine)
pd.read_sql_query("SELECT * FROM my_table;", engine)
#Write to Sql Query
pd.to_sql('myDF', engine)
###Output
_____no_output_____
###Markdown
Droppping
###Code
#values
print(s)
#Drop values from rows (axis=0)
s.drop(['a', 'c'])
#Drop values from columns(axis=1)
df.drop('Country', axis=1)
###Output
_____no_output_____
###Markdown
Sort & Rank
###Code
#Values df
print(df)
#Sort by labels along an axis
df.sort_index()
#Sort by the values along an axis
df.sort_values(by='Country')
#Assign ranks to entries
df.rank()
###Output
_____no_output_____
###Markdown
Retrieving Series/DataFrame Information Basic Information
###Code
#Values df
print(df)
#(rows,columns)
df.shape
#Describe index
df.index
#Describe DataFrame columns
df.columns
#Info on DataFrame
df.info()
#Number of non-NA values
df.count()
###Output
_____no_output_____
###Markdown
Summary
###Code
#Sum of values
df.sum()
#Cummulative sum of values
df.cumsum()
#Minimum
df.min()
#max values
df.max()
#Minimum/Maximum index value
df.idxmin()
df.idxmax()
#Summary statistics
df.describe()
#Mean of values
df.mean()
#Median of values
df.median()
###Output
_____no_output_____
###Markdown
Applying Functions
###Code
#Apply function
df.apply(lambda x: x*2)
###Output
_____no_output_____
###Markdown
Data Alignment
###Code
s3 = pd.Series([7, -2, 3], index=['a', 'c', 'd'])
s + s3
s.sub(s3, fill_value=2)
s.div(s3, fill_value=4)
s.mul(s3, fill_value=3)
###Output
_____no_output_____
###Markdown
Hoja de trucos pandas 2 Reshaping Data Pivot
###Code
import pandas as pd
###Declaracion de df
df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],
'population': [1864, 22000, 80000]},
index=['panda', 'polar', 'koala'])
#Data Frame — A two dimensional labeled data structure
data = {'Date': ['2016-03-01','2016-03-02','2016-03-01','2016-03-03','2016-03-02','2016-03-03'],
'Type': ['a','b','c','a','a','c'],
'Value': [11.432,13.031,20.784,99.906,1.303,20.784]}
df2 = pd.DataFrame(data,
columns=['Date', 'Type', 'Value'])
print(df2)
#Spread rows into columns
#Spread rows into columns
df3= df2.pivot(index='Date',
columns='Type',
values='Value')
print(df3)
###Output
Type a b c
Date
2016-03-01 11.432 NaN 20.784
2016-03-02 1.303 13.031 NaN
2016-03-03 99.906 NaN 20.784
###Markdown
Pivot Table
###Code
df4= pd.pivot_table(df2,
values='Value',
index='Date',
columns='Type')
print(df4)
# Pivot a level of column labels
#Gather columns into rows
pd.melt(df2,
id_vars=["Date"],
value_vars=["Type","Value"],
value_name="Observations")
##Column-index,series pairs
df.iteritems()
#Row-index,series pairs
df.iterrows()
###Output
_____no_output_____
###Markdown
Advanced Indexing Selecting
###Code
#Select cols with any vals >1
df3.loc[:,(df3>1).any()]
#Select cols with vals>1
df3.loc[:,(df3>1).all()]
#Select cols with NaN
df3.loc[:,df3.isnull().any()]
#Select cols without NAN
df3.loc[:,df3.notnull().all()]
###Output
_____no_output_____
###Markdown
Indexing With isin
###Code
#Find same elements
df[(df.Country.isin(df2.Type))]
#Filter on values
df3.filter(items=“a”,“b”])
#Select specific elements
df.select(lambda x: not x%5)
###Output
_____no_output_____
###Markdown
Where
###Code
#Subset the data
s.where(s > 0)
###Output
_____no_output_____
###Markdown
Query
###Code
#Query DataFrame
df6.query('second > first')
###Output
_____no_output_____
###Markdown
Setting/Resetting Index
###Code
# Set the index
df.set_index('Country')
#Reset the index
df4 = df.reset_index()
print(df4)
#Renamme DataFrame
df = df.rename(index=str,
columns={"Country":"cntry",
"Capital":"cptl",
"Population":"ppltn"})
print(df)
###Output
cntry cptl ppltn
0 Belgium Brussels 11190846
1 India New Delhi 1303171035
2 Brazil Brasília 207847528
###Markdown
Reindexing
###Code
s2 = s.reindex(['a','c','d','e','b'])
print(s2)
#Forward Filling
df.reindex(range(4),
method='Ffill')
#Backward Filling
s3 = s.reindex(range(5),
method='bfill')
###Output
_____no_output_____
###Markdown
Multilndexing
###Code
arrays = [np.array([1,2,3]),
np.array([5,4,3])]
df5= pd.DataFrame(np.random.rand(3, 2), index=arrays)
tuples = list(zip(*arrays))
index = pd.MultiIndex.from_tuples(tuples,
names=['first', 'second'])
df6= pd.DataFrame(np.random.rand(3, 2), index=index)
df2.set_index(["Date", "Type"])
###Output
_____no_output_____
###Markdown
Duplicate Data
###Code
#Return unique values
s3.unique()
print(s3)
#Check duplicates
df2.duplicated('Type')
#Drop duplicates
df2.drop_duplicates('Type', keep='last')
#Check index duplicates
df.index.duplicated()
###Output
_____no_output_____
###Markdown
Grouping Data Aggregation
###Code
df2.groupby(by=['Date','Type']).mean()
df4.groupby(level=0).sum()
df4.groupby(level=0).agg({'a':lambda x:sum(x)/len(x),
'b': np.sum})
###Output
_____no_output_____
###Markdown
Transformation
###Code
customSum = lambda x: (x+x%2)
df4.groupby(level=0).transform(customSum)
###Output
_____no_output_____
###Markdown
Mising Data
###Code
#Drop NaN values
df.dropna()
#Fill NaN values with a predetermined value
df3.fillna(df3.mean())
#Replace values with others
df2.replace("a","f")
###Output
_____no_output_____
###Markdown
Combining Data
###Code
data1 = pd.DataFrame({'X1': ['a','b','c'], 'X2': [11.432,1.303, 99.906]}); data1
data2 = pd.DataFrame({'X1': ['a','b','d'], 'X3': [20.78,"NaN", 20.784]}); data2
print(data1)
print(data2)
###Output
X1 X2
0 a 11.432
1 b 1.303
2 c 99.906
X1 X3
0 a 20.78
1 b NaN
2 d 20.784
###Markdown
Merge
###Code
pd.merge(data1,
data2,
how='left',
on='X1')
pd.merge(data1,
data2,
how='right',
on='X1')
pd.merge(data1,
data2,
how='inner',
on='X1')
pd.merge(data1,
data2,
how='outer',
on='X1')
###Output
_____no_output_____
###Markdown
Join
###Code
data1.join(data2, how='right')
###Output
_____no_output_____
###Markdown
Concatenate
###Code
#Vertical
s.append(s2)
#Horizontal/vertical
pd.concat([s,s2],axis=1, keys=['One','Two'])
pd.concat([data1, data2], axis=1, join='inner')
###Output
_____no_output_____
###Markdown
Dates
###Code
df2['Date']= pd.to_datetime(df2['Date'])
df2['Date']= pd.date_range('2000-1-1',
periods=6,
freq='M')
dates = [datetime(2012,5,1), datetime(2012,5,2)]
index = pd.DatetimeIndex(dates)
index = pd.date_range(datetime(2012,2,1), end, freq='BM')
###Output
_____no_output_____
###Markdown
Visualization
###Code
import matplotlib.pyplot as plt
s.plot()
plt.show()
print(s)
df2.plot()
plt.show()
###Output
_____no_output_____ |
OvernightReview1.ipynb | ###Markdown
Import and format the pickled bayer grid data below
###Code
# load our pickled stream object containing the image data
import pickle
with open('wall2.pickle','rb') as f:
stream = pickle.load(f)
# # alternatively, just open the jpeg file (also works)
# with open('wall1.jpeg', 'rb') as f:
# stream = io.BytesIO(f.read())
assert isinstance(stream, io.BytesIO)
ver = 1 # we used a v1 camera module for this image. Use `2` for v2
offset = {
1: 6404096,
2: 10270208,
}[ver]
data = stream.getvalue()[-offset:]
assert data[:4] == b'BRCM'
data = data[32768:]
data = np.frombuffer(data, dtype=np.uint8)
# For the V1 module, the data consists of 1952 rows of 3264 bytes of data.
# The last 8 rows of data are unused (they only exist because the maximum
# resolution of 1944 rows is rounded up to the nearest 16).
#
# For the V2 module, the data consists of 2480 rows of 4128 bytes of data.
# There's actually 2464 rows of data, but the sensor's raw size is 2466
# rows, rounded up to the nearest multiple of 16: 2480.
#
# Likewise, the last few bytes of each row are unused (why?). Here we
# reshape the data and strip off the unused bytes.
reshape, crop = {
1: ((1952, 3264), (1944, 3240)),
2: ((2480, 4128), (2464, 4100)),
}[ver]
data = data.reshape(reshape)[:crop[0], :crop[1]]
# Horizontally, each row consists of 10-bit values. Every four bytes are
# the high 8-bits of four values, and the 5th byte contains the packed low
# 2-bits of the preceding four values. In other words, the bits of the
# values A, B, C, D and arranged like so:
#
# byte 1 byte 2 byte 3 byte 4 byte 5
# AAAAAAAA BBBBBBBB CCCCCCCC DDDDDDDD AABBCCDD
#
# Here, we convert our data into a 16-bit array, shift all values left by
# 2-bits and unpack the low-order bits from every 5th byte in each row,
# then remove the columns containing the packed bits
data = data.astype(np.uint16) << 2
for byte in range(4):
data[:, byte::5] |= ((data[:, 4::5] >> ((4 - byte) * 2)) & 0b11)
data = np.delete(data, np.s_[4::5], 1)
# Now to split the data up into its red, green, and blue components. The
# Bayer pattern of the OV5647 sensor is BGGR. In other words the first
# row contains alternating green/blue elements, the second row contains
# alternating red/green elements, and so on as illustrated below:
#
# GBGBGBGBGBGBGB
# RGRGRGRGRGRGRG
# GBGBGBGBGBGBGB
# RGRGRGRGRGRGRG
#
# Please note that if you use vflip or hflip to change the orientation
# of the capture, you must flip the Bayer pattern accordingly
rgb = np.zeros(data.shape + (3,), dtype=data.dtype)
rgb[1::2, 0::2, 0] = data[1::2, 0::2] # Red
rgb[0::2, 0::2, 1] = data[0::2, 0::2] # Green
rgb[1::2, 1::2, 1] = data[1::2, 1::2] # Green
rgb[0::2, 1::2, 2] = data[0::2, 1::2] # Blue
uint16_to_uint8 = lambda a: (a * (255/1023)).astype(np.uint8) # note, this only works b/c the values are actually 10-bit
# uint16_to_uint8 = lambda a: (a >> 2).astype(np.uint8) # or bit-shift as suggested at the end
rgb8 = uint16_to_uint8(rgb)
np.max(rgb8)
assert rgb8.dtype == np.uint8
###Output
_____no_output_____
###Markdown
Define the patterns of the bayer grids in order to find the value each pixel needs to be normalized by
###Code
# rgb8.shape
# alternatively, convolution?
from scipy.signal import convolve
bayer = np.zeros(rgb.shape, dtype=np.uint8)
bayer[1::2, 0::2, 0] = 1 # Red
bayer[0::2, 0::2, 1] = 1 # Green
bayer[1::2, 1::2, 1] = 1 # Green
bayer[0::2, 1::2, 2] = 1 # Blue
###Output
_____no_output_____
###Markdown
define a 3x3 kernel of ones to convolve with the mosaiced image, therby implementing nearest neighbor interpolation.
###Code
kernel = np.ones((3,3),dtype=np.uint8)
bayer_conv = np.zeros(bayer.shape, dtype=np.uint8)
for i in range(3):
bayer_conv[:,:,i] = convolve(bayer[:,:,i], kernel, mode='same')
bayer_conv[:3,:3,0] # peek at top left corner of r plane
rgb8_conv = np.zeros(rgb8.shape, dtype=np.uint16) # max sum here should be 1275 for 5 maxed green sencels
for i in range(3):
rgb8_conv[:,:,i] = convolve(rgb8[:,:,i].astype(np.uint16), kernel, mode='same')
res = rgb8_conv / bayer_conv
res = res.astype(np.uint8)
plt.imshow(res)
import copy
testIm = copy.deepcopy(res)
###Output
_____no_output_____
###Markdown
flatten the three channels into a grayscale image
###Code
m = testIm.shape[0]
n = testIm.shape[1]
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
grayIm = rgb2gray(testIm)
plt.imshow(grayIm, cmap = 'gray')
vectIm = grayIm.reshape(m*n, 1)
# vectIm.shape
###Output
_____no_output_____
###Markdown
Create a histogram of all the pixel values in the grayscale image
###Code
plt.hist(vectIm, bins = 40)
###Output
_____no_output_____
###Markdown
Use the maximum pixel value of the matrix to rescale the matrix
###Code
pixRange = max(vectIm) - min(vectIm)
print(pixRange)
shiftIm = (grayIm - min(vectIm))*255/pixRange
plt.imshow(shiftIm, cmap = 'gray')
plt.hist(shiftIm.reshape(m*n, 1), bins = 40)
###Output
_____no_output_____ |
dev/22_vision_learner.ipynb | ###Markdown
Learner for the vision applications> All the functions necessary to build `Learner` suitable for transfer learning in computer vision Cut a pretrained model
###Code
# export
def _is_pool_type(l): return re.search(r'Pool[123]d$', l.__class__.__name__)
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
test_eq([bool(_is_pool_type(m_)) for m_ in m.children()], [True,False,False,True])
# export
def has_pool_type(m):
"Return `True` if `m` is a pooling layer or has one in its children"
if _is_pool_type(m): return True
for l in m.children():
if has_pool_type(l): return True
return False
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
assert has_pool_type(m)
test_eq([has_pool_type(m_) for m_ in m.children()], [True,False,False,True])
#export
def create_body(arch, pretrained=True, cut=None):
"Cut off the body of a typically pretrained `arch` as determined by `cut`"
model = arch(pretrained)
#cut = ifnone(cut, cnn_config(arch)['cut'])
if cut is None:
ll = list(enumerate(model.children()))
cut = next(i for i,o in reversed(ll) if has_pool_type(o))
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif callable(cut): return cut(model)
else: raise NamedError("cut must be either integer or a function")
###Output
_____no_output_____
###Markdown
`cut` can either be an integer, in which case we cut the model at the coresponding layer, or a function, in which case, this funciton returns `cut(model)`. It defaults to `cnn_config(arch)['cut']` if `arch` is in `cnn_config`, otherwise to the first layer that contains some pooling.
###Code
tst = lambda p : nn.Sequential(nn.Conv2d(4,5,3), nn.BatchNorm2d(5), nn.AvgPool2d(1), nn.Linear(3,4))
m = create_body(tst)
test_eq(len(m), 2)
m = create_body(tst, cut=3)
test_eq(len(m), 3)
m = create_body(tst, cut=noop)
test_eq(len(m), 4)
###Output
_____no_output_____
###Markdown
Num features
###Code
#export
def in_channels(m):
"Return the shape of the first weight layer in `m`."
for l in flatten_model(m):
if hasattr(l, 'weight'): return l.weight.shape[1]
raise Exception('No weight layer')
test_eq(in_channels(nn.Sequential(nn.Conv2d(5,4,3), nn.Conv2d(4,3,3))), 5)
test_eq(in_channels(nn.Sequential(nn.AvgPool2d(4), nn.Conv2d(4,3,3))), 4)
test_fail(lambda : in_channels(nn.Sequential(nn.AvgPool2d(4))))
#export
def num_features_model(m):
"Return the number of output features for `m`."
sz = 32
ch_in = in_channels(m)
while True:
#Trying for a few sizes in case the model requires a big input size.
try:
with hook_output(m) as hook:
_ = m.eval()(one_param(m).new(1, ch_in, sz, sz).requires_grad_(False).uniform_(-1.,1.))
return hook.stored.shape[1]
except Exception as e:
sz *= 2
if sz > 2048: raise
m = nn.Sequential(nn.Conv2d(5,4,3), nn.Conv2d(4,3,3))
test_eq(num_features_model(m), 3)
###Output
_____no_output_____
###Markdown
Head and model
###Code
#export
def create_head(nf, nc, lin_ftrs=None, ps=0.5, concat_pool=True, bn_final=False):
"Model head that takes `nf` features, runs through `lin_ftrs`, and out `nc` classes."
lin_ftrs = [nf, 512, nc] if lin_ftrs is None else [nf] + lin_ftrs + [nc]
ps = L(ps)
if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps
actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None]
pool = AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1)
layers = [pool, Flatten()]
for ni,no,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], ps, actns):
layers += BnDropLin(ni, no, True, p, actn)
if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
return nn.Sequential(*layers)
tst = create_head(5, 10)
tst
#export
def create_cnn_model(arch, nc, cut, pretrained=True, lin_ftrs=None, ps=0.5, custom_head=None,
bn_final=False, concat_pool=True):
"Create custom convnet architecture using `base_arch`"
body = create_body(arch, pretrained, cut)
if custom_head is None:
nf = num_features_model(nn.Sequential(*body.children())) * (2 if concat_pool else 1)
head = create_head(nf, nc, lin_ftrs, ps=ps, concat_pool=concat_pool, bn_final=bn_final)
else: head = custom_head
return nn.Sequential(body, head)
tst = create_cnn_model(models.resnet18, 10, None)
tst[0]
from local.data.block import *
imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
imagenet_stats = broadcast_vec(1, 4, *imagenet_stats)
pets = DataBlock(ts=(PILImage, Category),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=RegexLabeller(pat = r'/([^/]+)_\d+.jpg$'))
dbunch = pets.databunch(untar_data(URLs.PETS)/"images", ds_tfms=RandomResizedCrop(300, min_scale=0.5), bs=64,
dl_tfms=[*aug_transforms(size=224), Normalize(*imagenet_stats)])
get_c(dbunch)
dbunch.show_batch(max_n=9)
#export
def _default_split(m:nn.Module): return L(m[0], m[1]).map(params)
def _resnet_split(m): return L(m[0][:6], m[0][6:], m[1]).map(params)
def _squeezenet_split(m:nn.Module): return L(m[0][0][:5], m[0][0][5:], m[1]).map(params)
def _densenet_split(m:nn.Module): return L(m[0][0][:7],m[0][0][7:], m[1]).map(params)
def _vgg_split(m:nn.Module): return L(m[0][0][:22], m[0][0][22:], m[1]).map(params)
def _alexnet_split(m:nn.Module): return L(m[0][0][:6], m[0][0][6:], m[1]).map(params)
_default_meta = {'cut':None, 'split':_default_split}
_resnet_meta = {'cut':-2, 'split':_resnet_split }
_squeezenet_meta = {'cut':-1, 'split': _squeezenet_split}
_densenet_meta = {'cut':-1, 'split':_densenet_split}
_vgg_meta = {'cut':-2, 'split':_vgg_split}
_alexnet_meta = {'cut':-2, 'split':_alexnet_split}
#export
model_meta = {
models.resnet18 :{**_resnet_meta}, models.resnet34: {**_resnet_meta},
models.resnet50 :{**_resnet_meta}, models.resnet101:{**_resnet_meta},
models.resnet152:{**_resnet_meta},
models.squeezenet1_0:{**_squeezenet_meta},
models.squeezenet1_1:{**_squeezenet_meta},
models.densenet121:{**_densenet_meta}, models.densenet169:{**_densenet_meta},
models.densenet201:{**_densenet_meta}, models.densenet161:{**_densenet_meta},
models.vgg11_bn:{**_vgg_meta}, models.vgg13_bn:{**_vgg_meta}, models.vgg16_bn:{**_vgg_meta}, models.vgg19_bn:{**_vgg_meta},
models.alexnet:{**_alexnet_meta}}
#export
@delegates(Learner.__init__)
def cnn_learner(dbunch, arch, cut=None, pretrained=True, lin_ftrs=None, ps=0.5, custom_head=None, splitter=trainable_params, bn_final=False,
init=nn.init.kaiming_normal_, concat_pool=True, **kwargs):
"Build convnet style learner."
meta = model_meta.get(arch)
model = create_cnn_model(arch, get_c(dbunch), ifnone(cut, meta['cut']), pretrained, lin_ftrs, ps=ps, custom_head=custom_head,
bn_final=bn_final, concat_pool=concat_pool)
learn = Learner(dbunch, model, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
if init: apply_init(model[1], init)
return learn
learn = cnn_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat())
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core.ipynb.
Converted 01a_torch_core.ipynb.
Converted 02_script.ipynb.
Converted 03_dataloader.ipynb.
Converted 04_transform.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_vision_core.ipynb.
Converted 08_pets_tutorial.ipynb.
Converted 09_vision_augment.ipynb.
Converted 11_layers.ipynb.
Converted 11a_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 15_callback_hook.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_metrics.ipynb.
Converted 21_tutorial_imagenette.ipynb.
Converted 22_vision_learner.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_index.ipynb.
Converted 95_utils_test.ipynb.
Converted 96_data_external.ipynb.
Converted notebook2jekyll.ipynb.
###Markdown
Learner for the vision applications> All the functions necessary to build `Learner` suitable for transfer learning in computer vision Cut a pretrained model
###Code
# export
def _is_pool_type(l): return re.search(r'Pool[123]d$', l.__class__.__name__)
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
test_eq([bool(_is_pool_type(m_)) for m_ in m.children()], [True,False,False,True])
# export
def has_pool_type(m):
"Return `True` if `m` is a pooling layer or has one in its children"
if _is_pool_type(m): return True
for l in m.children():
if has_pool_type(l): return True
return False
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
assert has_pool_type(m)
test_eq([has_pool_type(m_) for m_ in m.children()], [True,False,False,True])
#export
def create_body(arch, pretrained=True, cut=None):
"Cut off the body of a typically pretrained `arch` as determined by `cut`"
model = arch(pretrained)
#cut = ifnone(cut, cnn_config(arch)['cut'])
if cut is None:
ll = list(enumerate(model.children()))
cut = next(i for i,o in reversed(ll) if has_pool_type(o))
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif isinstance(cut, Callable): return cut(model)
else: raise NamedError("cut must be either integer or a function")
###Output
_____no_output_____
###Markdown
`cut` can either be an integer, in which case we cut the model at the coresponding layer, or a function, in which case, this funciton returns `cut(model)`. It defaults to `cnn_config(arch)['cut']` if `arch` is in `cnn_config`, otherwise to the first layer that contains some pooling.
###Code
tst = lambda p : nn.Sequential(nn.Conv2d(4,5,3), nn.BatchNorm2d(5), nn.AvgPool2d(1), nn.Linear(3,4))
m = create_body(tst)
test_eq(len(m), 2)
m = create_body(tst, cut=3)
test_eq(len(m), 3)
m = create_body(tst, cut=noop)
test_eq(len(m), 4)
###Output
_____no_output_____
###Markdown
Num features
###Code
#export
def in_channels(m):
"Return the shape of the first weight layer in `m`."
for l in flatten_model(m):
if hasattr(l, 'weight'): return l.weight.shape[1]
raise Exception('No weight layer')
test_eq(in_channels(nn.Sequential(nn.Conv2d(5,4,3), nn.Conv2d(4,3,3))), 5)
test_eq(in_channels(nn.Sequential(nn.AvgPool2d(4), nn.Conv2d(4,3,3))), 4)
test_fail(lambda : in_channels(nn.Sequential(nn.AvgPool2d(4))))
#export
def num_features_model(m):
"Return the number of output features for `m`."
sz = 32
ch_in = in_channels(m)
while True:
#Trying for a few sizes in case the model requires a big input size.
try:
with hook_output(m) as hook:
_ = m.eval()(one_param(m).new(1, ch_in, sz, sz).requires_grad_(False).uniform_(-1.,1.))
return hook.stored.shape[1]
except Exception as e:
sz *= 2
if sz > 2048: raise
m = nn.Sequential(nn.Conv2d(5,4,3), nn.Conv2d(4,3,3))
test_eq(num_features_model(m), 3)
###Output
_____no_output_____
###Markdown
Head and model
###Code
#export
def create_head(nf, nc, lin_ftrs=None, ps=0.5, concat_pool=True, bn_final=False):
"Model head that takes `nf` features, runs through `lin_ftrs`, and out `nc` classes."
lin_ftrs = [nf, 512, nc] if lin_ftrs is None else [nf] + lin_ftrs + [nc]
ps = L(ps)
if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps
actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None]
pool = AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1)
layers = [pool, Flatten()]
for ni,no,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], ps, actns):
layers += BnDropLin(ni, no, True, p, actn)
if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
return nn.Sequential(*layers)
tst = create_head(5, 10)
tst
#export
def create_cnn_model(arch, nc, cut, pretrained=True, lin_ftrs=None, ps=0.5, custom_head=None,
bn_final=False, concat_pool=True):
"Create custom convnet architecture using `base_arch`"
body = create_body(arch, pretrained, cut)
if custom_head is None:
nf = num_features_model(nn.Sequential(*body.children())) * (2 if concat_pool else 1)
head = create_head(nf, nc, lin_ftrs, ps=ps, concat_pool=concat_pool, bn_final=bn_final)
else: head = custom_head
return nn.Sequential(body, head)
tst = create_cnn_model(models.resnet18, 10, None)
tst[0]
from local.data.block import *
imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
imagenet_stats = broadcast_vec(1, 4, *imagenet_stats)
pets = DataBlock(ts=(PILImage, Category),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=RegexLabeller(pat = r'/([^/]+)_\d+.jpg$'))
dbunch = pets.databunch(untar_data(URLs.PETS)/"images", ds_tfms=RandomResizedCrop(300, min_scale=0.5), bs=64,
dl_tfms=[*aug_transforms(size=224), Normalize(*imagenet_stats)])
get_c(dbunch)
dbunch.show_batch(max_n=9)
#export
def _default_split(m:nn.Module): return L(m[0], m[1]).mapped(trainable_params)
def _resnet_split(m): return L(m[0][:6], m[0][6:], m[1]).mapped(trainable_params)
def _squeezenet_split(m:nn.Module): return L(m[0][0][:5], m[0][0][5:], m[1]).mapped(trainable_params)
def _densenet_split(m:nn.Module): return L(m[0][0][:7],m[0][0][7:], m[1]).mapped(trainable_params)
def _vgg_split(m:nn.Module): return L(m[0][0][:22], m[0][0][22:], m[1]).mapped(trainable_params)
def _alexnet_split(m:nn.Module): return L(m[0][0][:6], m[0][0][6:], m[1]).mapped(trainable_params)
_default_meta = {'cut':None, 'split':_default_split}
_resnet_meta = {'cut':-2, 'split':_resnet_split }
_squeezenet_meta = {'cut':-1, 'split': _squeezenet_split}
_densenet_meta = {'cut':-1, 'split':_densenet_split}
_vgg_meta = {'cut':-2, 'split':_vgg_split}
_alexnet_meta = {'cut':-2, 'split':_alexnet_split}
#export
model_meta = {
models.resnet18 :{**_resnet_meta}, models.resnet34: {**_resnet_meta},
models.resnet50 :{**_resnet_meta}, models.resnet101:{**_resnet_meta},
models.resnet152:{**_resnet_meta},
models.squeezenet1_0:{**_squeezenet_meta},
models.squeezenet1_1:{**_squeezenet_meta},
models.densenet121:{**_densenet_meta}, models.densenet169:{**_densenet_meta},
models.densenet201:{**_densenet_meta}, models.densenet161:{**_densenet_meta},
models.vgg11_bn:{**_vgg_meta}, models.vgg13_bn:{**_vgg_meta}, models.vgg16_bn:{**_vgg_meta}, models.vgg19_bn:{**_vgg_meta},
models.alexnet:{**_alexnet_meta}}
#export
@delegates(Learner.__init__)
def cnn_learner(dbunch, arch, cut=None, pretrained=True, lin_ftrs=None, ps=0.5, custom_head=None, splitter=trainable_params, bn_final=False,
init=nn.init.kaiming_normal_, concat_pool=True, **kwargs):
"Build convnet style learner."
meta = model_meta.get(arch)
model = create_cnn_model(arch, get_c(dbunch), ifnone(cut, meta['cut']), pretrained, lin_ftrs, ps=ps, custom_head=custom_head,
bn_final=bn_final, concat_pool=concat_pool)
learn = Learner(dbunch, model, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
if init: apply_init(model[1], init)
return learn
learn = cnn_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat())
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core.ipynb.
Converted 01a_torch_core.ipynb.
Converted 01b_script.ipynb.
Converted 01c_dataloader.ipynb.
Converted 02_data_transforms.ipynb.
Converted 03_data_pipeline.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_source.ipynb.
Converted 07_vision_core.ipynb.
Converted 08_pets_tutorial.ipynb.
Converted 09_vision_augment.ipynb.
Converted 11_layers.ipynb.
Converted 11a_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 15_callback_hook.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_metrics.ipynb.
Converted 21_tutorial_imagenette.ipynb.
Converted 22_vision_learner.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_index.ipynb.
Converted 95_utils_test.ipynb.
Converted 96_data_external.ipynb.
Converted notebook2jekyll.ipynb.
###Markdown
Learner for the vision applications> All the functions necessary to build `Learner` suitable for transfer learning in computer vision Cut a pretrained model
###Code
# export
def _is_pool_type(l): return re.search(r'Pool[123]d$', l.__class__.__name__)
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
test_eq([bool(_is_pool_type(m_)) for m_ in m.children()], [True,False,False,True])
# export
def has_pool_type(m):
"Return `True` if `m` is a pooling layer or has one in its children"
if _is_pool_type(m): return True
for l in m.children():
if has_pool_type(l): return True
return False
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
assert has_pool_type(m)
test_eq([has_pool_type(m_) for m_ in m.children()], [True,False,False,True])
#export
def create_body(arch, pretrained=True, cut=None):
"Cut off the body of a typically pretrained `arch` as determined by `cut`"
model = arch(pretrained)
#cut = ifnone(cut, cnn_config(arch)['cut'])
if cut is None:
ll = list(enumerate(model.children()))
cut = next(i for i,o in reversed(ll) if has_pool_type(o))
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif callable(cut): return cut(model)
else: raise NamedError("cut must be either integer or a function")
###Output
_____no_output_____
###Markdown
`cut` can either be an integer, in which case we cut the model at the coresponding layer, or a function, in which case, this funciton returns `cut(model)`. It defaults to `cnn_config(arch)['cut']` if `arch` is in `cnn_config`, otherwise to the first layer that contains some pooling.
###Code
tst = lambda p : nn.Sequential(nn.Conv2d(4,5,3), nn.BatchNorm2d(5), nn.AvgPool2d(1), nn.Linear(3,4))
m = create_body(tst)
test_eq(len(m), 2)
m = create_body(tst, cut=3)
test_eq(len(m), 3)
m = create_body(tst, cut=noop)
test_eq(len(m), 4)
###Output
_____no_output_____
###Markdown
Num features
###Code
#export
def in_channels(m):
"Return the shape of the first weight layer in `m`."
for l in flatten_model(m):
if hasattr(l, 'weight'): return l.weight.shape[1]
raise Exception('No weight layer')
test_eq(in_channels(nn.Sequential(nn.Conv2d(5,4,3), nn.Conv2d(4,3,3))), 5)
test_eq(in_channels(nn.Sequential(nn.AvgPool2d(4), nn.Conv2d(4,3,3))), 4)
test_fail(lambda : in_channels(nn.Sequential(nn.AvgPool2d(4))))
#export
def num_features_model(m):
"Return the number of output features for `m`."
sz = 32
ch_in = in_channels(m)
while True:
#Trying for a few sizes in case the model requires a big input size.
try:
with hook_output(m) as hook:
_ = m.eval()(one_param(m).new(1, ch_in, sz, sz).requires_grad_(False).uniform_(-1.,1.))
return hook.stored.shape[1]
except Exception as e:
sz *= 2
if sz > 2048: raise
m = nn.Sequential(nn.Conv2d(5,4,3), nn.Conv2d(4,3,3))
test_eq(num_features_model(m), 3)
###Output
_____no_output_____
###Markdown
Head and model
###Code
#export
def create_head(nf, nc, lin_ftrs=None, ps=0.5, concat_pool=True, bn_final=False):
"Model head that takes `nf` features, runs through `lin_ftrs`, and out `nc` classes."
lin_ftrs = [nf, 512, nc] if lin_ftrs is None else [nf] + lin_ftrs + [nc]
ps = L(ps)
if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps
actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None]
pool = AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1)
layers = [pool, Flatten()]
for ni,no,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], ps, actns):
layers += BnDropLin(ni, no, True, p, actn)
if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
return nn.Sequential(*layers)
tst = create_head(5, 10)
tst
#export
def create_cnn_model(arch, nc, cut, pretrained=True, lin_ftrs=None, ps=0.5, custom_head=None,
bn_final=False, concat_pool=True):
"Create custom convnet architecture using `base_arch`"
body = create_body(arch, pretrained, cut)
if custom_head is None:
nf = num_features_model(nn.Sequential(*body.children())) * (2 if concat_pool else 1)
head = create_head(nf, nc, lin_ftrs, ps=ps, concat_pool=concat_pool, bn_final=bn_final)
else: head = custom_head
return nn.Sequential(body, head)
tst = create_cnn_model(models.resnet18, 10, None)
tst[0]
from local.data.block import *
imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
imagenet_stats = broadcast_vec(1, 4, *imagenet_stats)
pets = DataBlock(ts=(PILImage, Category),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=RegexLabeller(pat = r'/([^/]+)_\d+.jpg$'))
dbunch = pets.databunch(untar_data(URLs.PETS)/"images", ds_tfms=RandomResizedCrop(300, min_scale=0.5), bs=64,
dl_tfms=[*aug_transforms(size=224), Normalize(*imagenet_stats)])
get_c(dbunch)
dbunch.show_batch(max_n=9)
#export
def _default_split(m:nn.Module): return L(m[0], m[1]).mapped(trainable_params)
def _resnet_split(m): return L(m[0][:6], m[0][6:], m[1]).mapped(trainable_params)
def _squeezenet_split(m:nn.Module): return L(m[0][0][:5], m[0][0][5:], m[1]).mapped(trainable_params)
def _densenet_split(m:nn.Module): return L(m[0][0][:7],m[0][0][7:], m[1]).mapped(trainable_params)
def _vgg_split(m:nn.Module): return L(m[0][0][:22], m[0][0][22:], m[1]).mapped(trainable_params)
def _alexnet_split(m:nn.Module): return L(m[0][0][:6], m[0][0][6:], m[1]).mapped(trainable_params)
_default_meta = {'cut':None, 'split':_default_split}
_resnet_meta = {'cut':-2, 'split':_resnet_split }
_squeezenet_meta = {'cut':-1, 'split': _squeezenet_split}
_densenet_meta = {'cut':-1, 'split':_densenet_split}
_vgg_meta = {'cut':-2, 'split':_vgg_split}
_alexnet_meta = {'cut':-2, 'split':_alexnet_split}
#export
model_meta = {
models.resnet18 :{**_resnet_meta}, models.resnet34: {**_resnet_meta},
models.resnet50 :{**_resnet_meta}, models.resnet101:{**_resnet_meta},
models.resnet152:{**_resnet_meta},
models.squeezenet1_0:{**_squeezenet_meta},
models.squeezenet1_1:{**_squeezenet_meta},
models.densenet121:{**_densenet_meta}, models.densenet169:{**_densenet_meta},
models.densenet201:{**_densenet_meta}, models.densenet161:{**_densenet_meta},
models.vgg11_bn:{**_vgg_meta}, models.vgg13_bn:{**_vgg_meta}, models.vgg16_bn:{**_vgg_meta}, models.vgg19_bn:{**_vgg_meta},
models.alexnet:{**_alexnet_meta}}
#export
@delegates(Learner.__init__)
def cnn_learner(dbunch, arch, cut=None, pretrained=True, lin_ftrs=None, ps=0.5, custom_head=None, splitter=trainable_params, bn_final=False,
init=nn.init.kaiming_normal_, concat_pool=True, **kwargs):
"Build convnet style learner."
meta = model_meta.get(arch)
model = create_cnn_model(arch, get_c(dbunch), ifnone(cut, meta['cut']), pretrained, lin_ftrs, ps=ps, custom_head=custom_head,
bn_final=bn_final, concat_pool=concat_pool)
learn = Learner(dbunch, model, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
if init: apply_init(model[1], init)
return learn
learn = cnn_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat())
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core.ipynb.
Converted 01a_torch_core.ipynb.
Converted 02_script.ipynb.
Converted 03_dataloader.ipynb.
Converted 04_transform.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_vision_core.ipynb.
Converted 08_pets_tutorial.ipynb.
Converted 09_vision_augment.ipynb.
Converted 11_layers.ipynb.
Converted 11a_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 15_callback_hook.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_metrics.ipynb.
Converted 21_tutorial_imagenette.ipynb.
Converted 22_vision_learner.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_index.ipynb.
Converted 95_utils_test.ipynb.
Converted 96_data_external.ipynb.
Converted notebook2jekyll.ipynb.
###Markdown
Learner for the vision applications> All the functions necessary to build `Learner` suitable for transfer learning in computer vision Cut a pretrained model
###Code
# export
def _is_pool_type(l): return re.search(r'Pool[123]d$', l.__class__.__name__)
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
test_eq([bool(_is_pool_type(m_)) for m_ in m.children()], [True,False,False,True])
# export
def has_pool_type(m):
"Return `True` if `m` is a pooling layer or has one in its children"
if _is_pool_type(m): return True
for l in m.children():
if has_pool_type(l): return True
return False
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
assert has_pool_type(m)
test_eq([has_pool_type(m_) for m_ in m.children()], [True,False,False,True])
#export
def create_body(arch, pretrained=True, cut=None):
"Cut off the body of a typically pretrained `arch` as determined by `cut`"
model = arch(pretrained)
#cut = ifnone(cut, cnn_config(arch)['cut'])
if cut is None:
ll = list(enumerate(model.children()))
cut = next(i for i,o in reversed(ll) if has_pool_type(o))
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif callable(cut): return cut(model)
else: raise NamedError("cut must be either integer or a function")
###Output
_____no_output_____
###Markdown
`cut` can either be an integer, in which case we cut the model at the coresponding layer, or a function, in which case, this funciton returns `cut(model)`. It defaults to `cnn_config(arch)['cut']` if `arch` is in `cnn_config`, otherwise to the first layer that contains some pooling.
###Code
tst = lambda p : nn.Sequential(nn.Conv2d(4,5,3), nn.BatchNorm2d(5), nn.AvgPool2d(1), nn.Linear(3,4))
m = create_body(tst)
test_eq(len(m), 2)
m = create_body(tst, cut=3)
test_eq(len(m), 3)
m = create_body(tst, cut=noop)
test_eq(len(m), 4)
###Output
_____no_output_____
###Markdown
Num features
###Code
#export
def in_channels(m):
"Return the shape of the first weight layer in `m`."
for l in flatten_model(m):
if hasattr(l, 'weight'): return l.weight.shape[1]
raise Exception('No weight layer')
test_eq(in_channels(nn.Sequential(nn.Conv2d(5,4,3), nn.Conv2d(4,3,3))), 5)
test_eq(in_channels(nn.Sequential(nn.AvgPool2d(4), nn.Conv2d(4,3,3))), 4)
test_fail(lambda : in_channels(nn.Sequential(nn.AvgPool2d(4))))
#export
def num_features_model(m):
"Return the number of output features for `m`."
sz = 32
ch_in = in_channels(m)
while True:
#Trying for a few sizes in case the model requires a big input size.
try:
with hook_output(m) as hook:
_ = m.eval()(one_param(m).new(1, ch_in, sz, sz).requires_grad_(False).uniform_(-1.,1.))
return hook.stored.shape[1]
except Exception as e:
sz *= 2
if sz > 2048: raise
m = nn.Sequential(nn.Conv2d(5,4,3), nn.Conv2d(4,3,3))
test_eq(num_features_model(m), 3)
###Output
_____no_output_____
###Markdown
Head and model
###Code
#export
def create_head(nf, nc, lin_ftrs=None, ps=0.5, concat_pool=True, bn_final=False):
"Model head that takes `nf` features, runs through `lin_ftrs`, and out `nc` classes."
lin_ftrs = [nf, 512, nc] if lin_ftrs is None else [nf] + lin_ftrs + [nc]
ps = L(ps)
if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps
actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None]
pool = AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1)
layers = [pool, Flatten()]
for ni,no,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], ps, actns):
layers += BnDropLin(ni, no, True, p, actn)
if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
return nn.Sequential(*layers)
tst = create_head(5, 10)
tst
#export
def create_cnn_model(arch, nc, cut, pretrained=True, lin_ftrs=None, ps=0.5, custom_head=None,
bn_final=False, concat_pool=True):
"Create custom convnet architecture using `base_arch`"
body = create_body(arch, pretrained, cut)
if custom_head is None:
nf = num_features_model(nn.Sequential(*body.children())) * (2 if concat_pool else 1)
head = create_head(nf, nc, lin_ftrs, ps=ps, concat_pool=concat_pool, bn_final=bn_final)
else: head = custom_head
return nn.Sequential(body, head)
tst = create_cnn_model(models.resnet18, 10, None)
tst[0]
from local.data.block import *
imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
imagenet_stats = broadcast_vec(1, 4, *imagenet_stats)
pets = DataBlock(ts=(PILImage, Category),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=RegexLabeller(pat = r'/([^/]+)_\d+.jpg$'))
dbunch = pets.databunch(untar_data(URLs.PETS)/"images", ds_tfms=RandomResizedCrop(300, min_scale=0.5), bs=64,
dl_tfms=[*aug_transforms(size=224), Normalize(*imagenet_stats)])
get_c(dbunch)
dbunch.show_batch(max_n=9)
#export
def _default_split(m:nn.Module): return L(m[0], m[1]).map(params)
def _resnet_split(m): return L(m[0][:6], m[0][6:], m[1]).map(params)
def _squeezenet_split(m:nn.Module): return L(m[0][0][:5], m[0][0][5:], m[1]).map(params)
def _densenet_split(m:nn.Module): return L(m[0][0][:7],m[0][0][7:], m[1]).map(params)
def _vgg_split(m:nn.Module): return L(m[0][0][:22], m[0][0][22:], m[1]).map(params)
def _alexnet_split(m:nn.Module): return L(m[0][0][:6], m[0][0][6:], m[1]).map(params)
_default_meta = {'cut':None, 'split':_default_split}
_resnet_meta = {'cut':-2, 'split':_resnet_split }
_squeezenet_meta = {'cut':-1, 'split': _squeezenet_split}
_densenet_meta = {'cut':-1, 'split':_densenet_split}
_vgg_meta = {'cut':-2, 'split':_vgg_split}
_alexnet_meta = {'cut':-2, 'split':_alexnet_split}
#export
model_meta = {
models.resnet18 :{**_resnet_meta}, models.resnet34: {**_resnet_meta},
models.resnet50 :{**_resnet_meta}, models.resnet101:{**_resnet_meta},
models.resnet152:{**_resnet_meta},
models.squeezenet1_0:{**_squeezenet_meta},
models.squeezenet1_1:{**_squeezenet_meta},
models.densenet121:{**_densenet_meta}, models.densenet169:{**_densenet_meta},
models.densenet201:{**_densenet_meta}, models.densenet161:{**_densenet_meta},
models.vgg11_bn:{**_vgg_meta}, models.vgg13_bn:{**_vgg_meta}, models.vgg16_bn:{**_vgg_meta}, models.vgg19_bn:{**_vgg_meta},
models.alexnet:{**_alexnet_meta}}
#export
@delegates(Learner.__init__)
def cnn_learner(dbunch, arch, cut=None, pretrained=True, lin_ftrs=None, ps=0.5, custom_head=None, splitter=None, bn_final=False,
init=nn.init.kaiming_normal_, concat_pool=True, **kwargs):
"Build convnet style learner."
meta = model_meta.get(arch, _default_meta)
model = create_cnn_model(arch, get_c(dbunch), ifnone(cut, meta['cut']), pretrained, lin_ftrs, ps=ps, custom_head=custom_head,
bn_final=bn_final, concat_pool=concat_pool)
learn = Learner(dbunch, model, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
if init: apply_init(model[1], init)
return learn
learn = cnn_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat())
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core.ipynb.
Converted 01a_torch_core.ipynb.
Converted 02_script.ipynb.
Converted 03_dataloader.ipynb.
Converted 04_transform.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_transforms.ipynb.
Converted 07_vision_core.ipynb.
Converted 08_pets_tutorial.ipynb.
Converted 09_vision_augment.ipynb.
Converted 11_layers.ipynb.
Converted 11a_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 15_callback_hook.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_metrics.ipynb.
Converted 21_tutorial_imagenette.ipynb.
Converted 22_vision_learner.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 37_text_learner.ipynb.
Converted 38_tutorial_ulmfit.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 42_tabular_rapids.ipynb.
Converted 50_data_block.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_index.ipynb.
Converted 95_utils_test.ipynb.
Converted 96_data_external.ipynb.
Converted notebook2jekyll.ipynb.
###Markdown
Learner for the vision applications> All the functions necessary to build `Learner` suitable for transfer learning in computer vision Cut a pretrained model
###Code
# export
def _is_pool_type(l): return re.search(r'Pool[123]d$', l.__class__.__name__)
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
test_eq([bool(_is_pool_type(m_)) for m_ in m.children()], [True,False,False,True])
# export
def has_pool_type(m):
"Return `True` if `m` is a pooling layer or has one in its children"
if _is_pool_type(m): return True
for l in m.children():
if has_pool_type(l): return True
return False
m = nn.Sequential(nn.AdaptiveAvgPool2d(5), nn.Linear(2,3), nn.Conv2d(2,3,1), nn.MaxPool3d(5))
assert has_pool_type(m)
test_eq([has_pool_type(m_) for m_ in m.children()], [True,False,False,True])
#export
def create_body(arch, pretrained=True, cut=None):
"Cut off the body of a typically pretrained `arch` as determined by `cut`"
model = arch(pretrained)
#cut = ifnone(cut, cnn_config(arch)['cut'])
if cut is None:
ll = list(enumerate(model.children()))
cut = next(i for i,o in reversed(ll) if has_pool_type(o))
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif isinstance(cut, Callable): return cut(model)
else: raise NamedError("cut must be either integer or a function")
###Output
_____no_output_____
###Markdown
`cut` can either be an integer, in which case we cut the model at the coresponding layer, or a function, in which case, this funciton returns `cut(model)`. It defaults to `cnn_config(arch)['cut']` if `arch` is in `cnn_config`, otherwise to the first layer that contains some pooling.
###Code
tst = lambda p : nn.Sequential(nn.Conv2d(4,5,3), nn.BatchNorm2d(5), nn.AvgPool2d(1), nn.Linear(3,4))
m = create_body(tst)
test_eq(len(m), 2)
m = create_body(tst, cut=3)
test_eq(len(m), 3)
m = create_body(tst, cut=noop)
test_eq(len(m), 4)
###Output
_____no_output_____
###Markdown
Num features
###Code
#export
def in_channels(m):
"Return the shape of the first weight layer in `m`."
for l in flatten_model(m):
if hasattr(l, 'weight'): return l.weight.shape[1]
raise Exception('No weight layer')
test_eq(in_channels(nn.Sequential(nn.Conv2d(5,4,3), nn.Conv2d(4,3,3))), 5)
test_eq(in_channels(nn.Sequential(nn.AvgPool2d(4), nn.Conv2d(4,3,3))), 4)
test_fail(lambda : in_channels(nn.Sequential(nn.AvgPool2d(4))))
#export
def num_features_model(m):
"Return the number of output features for `m`."
sz = 32
ch_in = in_channels(m)
while True:
#Trying for a few sizes in case the model requires a big input size.
try:
with hook_output(m) as hook:
_ = m.eval()(one_param(m).new(1, ch_in, sz, sz).requires_grad_(False).uniform_(-1.,1.))
return hook.stored.shape[1]
except Exception as e:
sz *= 2
if sz > 2048: raise
m = nn.Sequential(nn.Conv2d(5,4,3), nn.Conv2d(4,3,3))
test_eq(num_features_model(m), 3)
###Output
_____no_output_____
###Markdown
Head and model
###Code
#export
def create_head(nf, nc, lin_ftrs=None, ps=0.5, concat_pool=True, bn_final=False):
"Model head that takes `nf` features, runs through `lin_ftrs`, and out `nc` classes."
lin_ftrs = [nf, 512, nc] if lin_ftrs is None else [nf] + lin_ftrs + [nc]
ps = L(ps)
if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps
actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None]
pool = AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1)
layers = [pool, Flatten()]
for ni,no,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], ps, actns):
layers += BnDropLin(ni, no, True, p, actn)
if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
return nn.Sequential(*layers)
tst = create_head(5, 10)
tst
#export
def create_cnn_model(arch, nc, cut, pretrained=True, lin_ftrs=None, ps=0.5, custom_head=None,
bn_final=False, concat_pool=True):
"Create custom convnet architecture using `base_arch`"
body = create_body(arch, pretrained, cut)
if custom_head is None:
nf = num_features_model(nn.Sequential(*body.children())) * (2 if concat_pool else 1)
head = create_head(nf, nc, lin_ftrs, ps=ps, concat_pool=concat_pool, bn_final=bn_final)
else: head = custom_head
return nn.Sequential(body, head)
tst = create_cnn_model(models.resnet18, 10, None)
tst[0]
from local.data.block import *
imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
imagenet_stats = broadcast_vec(1, 4, *imagenet_stats)
pets = DataBlock(ts=(PILImage, Category),
get_items=get_image_files,
splitter=RandomSplitter(),
get_y=RegexLabeller(pat = r'/([^/]+)_\d+.jpg$'))
dbunch = pets.databunch(untar_data(URLs.PETS)/"images", ds_tfms=RandomResizedCrop(300, min_scale=0.5), bs=64,
dl_tfms=[*aug_transforms(size=224), Normalize(*imagenet_stats)])
get_c(dbunch)
dbunch.show_batch(max_n=9)
#export
def _default_split(m:nn.Module): return L(m[0], m[1]).mapped(trainable_params)
def _resnet_split(m): return L(m[0][:6], m[0][6:], m[1]).mapped(trainable_params)
def _squeezenet_split(m:nn.Module): return L(m[0][0][:5], m[0][0][5:], m[1]).mapped(trainable_params)
def _densenet_split(m:nn.Module): return L(m[0][0][:7],m[0][0][7:], m[1]).mapped(trainable_params)
def _vgg_split(m:nn.Module): return L(m[0][0][:22], m[0][0][22:], m[1]).mapped(trainable_params)
def _alexnet_split(m:nn.Module): return L(m[0][0][:6], m[0][0][6:], m[1]).mapped(trainable_params)
_default_meta = {'cut':None, 'split':_default_split}
_resnet_meta = {'cut':-2, 'split':_resnet_split }
_squeezenet_meta = {'cut':-1, 'split': _squeezenet_split}
_densenet_meta = {'cut':-1, 'split':_densenet_split}
_vgg_meta = {'cut':-2, 'split':_vgg_split}
_alexnet_meta = {'cut':-2, 'split':_alexnet_split}
#export
model_meta = {
models.resnet18 :{**_resnet_meta}, models.resnet34: {**_resnet_meta},
models.resnet50 :{**_resnet_meta}, models.resnet101:{**_resnet_meta},
models.resnet152:{**_resnet_meta},
models.squeezenet1_0:{**_squeezenet_meta},
models.squeezenet1_1:{**_squeezenet_meta},
models.densenet121:{**_densenet_meta}, models.densenet169:{**_densenet_meta},
models.densenet201:{**_densenet_meta}, models.densenet161:{**_densenet_meta},
models.vgg11_bn:{**_vgg_meta}, models.vgg13_bn:{**_vgg_meta}, models.vgg16_bn:{**_vgg_meta}, models.vgg19_bn:{**_vgg_meta},
models.alexnet:{**_alexnet_meta}}
#export
@delegates(Learner.__init__)
def cnn_learner(dbunch, arch, cut=None, pretrained=True, lin_ftrs=None, ps=0.5, custom_head=None, splitter=trainable_params, bn_final=False,
init=nn.init.kaiming_normal_, concat_pool=True, **kwargs):
"Build convnet style learner."
meta = model_meta.get(arch)
model = create_cnn_model(arch, get_c(dbunch), ifnone(cut, meta['cut']), pretrained, lin_ftrs, ps=ps, custom_head=custom_head,
bn_final=bn_final, concat_pool=concat_pool)
learn = Learner(model, dbunch, splitter=ifnone(splitter, meta['split']), **kwargs)
if pretrained: learn.freeze()
if init: apply_init(model[1], init)
return learn
learn = cnn_learner(dbunch, models.resnet34, loss_func=CrossEntropyLossFlat())
###Output
_____no_output_____
###Markdown
Export -
###Code
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
###Output
Converted 00_test.ipynb.
Converted 01_core.ipynb.
Converted 01a_torch_core.ipynb.
Converted 01b_script.ipynb.
Converted 01c_dataloader.ipynb.
Converted 02_data_transforms.ipynb.
Converted 03_data_pipeline.ipynb.
Converted 05_data_core.ipynb.
Converted 06_data_source.ipynb.
Converted 07_vision_core.ipynb.
Converted 08_pets_tutorial.ipynb.
Converted 09_vision_augment.ipynb.
Converted 11_layers.ipynb.
Converted 11a_vision_models_xresnet.ipynb.
Converted 12_optimizer.ipynb.
Converted 13_learner.ipynb.
Converted 14_callback_schedule.ipynb.
Converted 15_callback_hook.ipynb.
Converted 16_callback_progress.ipynb.
Converted 17_callback_tracker.ipynb.
Converted 18_callback_fp16.ipynb.
Converted 19_callback_mixup.ipynb.
Converted 20_metrics.ipynb.
Converted 21_tutorial_imagenette.ipynb.
Converted 22_vision_learner.ipynb.
Converted 23_tutorial_transfer_learning.ipynb.
Converted 30_text_core.ipynb.
Converted 31_text_data.ipynb.
Converted 32_text_models_awdlstm.ipynb.
Converted 33_text_models_core.ipynb.
Converted 34_callback_rnn.ipynb.
Converted 35_tutorial_wikitext.ipynb.
Converted 36_text_models_qrnn.ipynb.
Converted 40_tabular_core.ipynb.
Converted 41_tabular_model.ipynb.
Converted 50_data_block.ipynb.
Converted 90_notebook_core.ipynb.
Converted 91_notebook_export.ipynb.
Converted 92_notebook_showdoc.ipynb.
Converted 93_notebook_export2html.ipynb.
Converted 94_index.ipynb.
Converted 95_utils_test.ipynb.
Converted 96_data_external.ipynb.
Converted notebook2jekyll.ipynb.
Converted tmp.ipynb.
|
courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/custom_layers_and_models.ipynb | ###Markdown
Making New Layers and Models via Subclassing Learning Objectives* Use Layer class as the combination of state (weights) and computation.* Defer weight creation until the shape of the inputs is known.* Build recursively composable layers.* Compute loss using add_loss() method.* Compute average using add_metric() method.* Enable serialization on layers. IntroductionThis tutorial shows how to build new layers and models via [subclassing](https://towardsdatascience.com/model-sub-classing-and-custom-training-loop-from-scratch-in-tensorflow-2-cc1d4f10fb4e).__Subclassing__ is a term that refers inheriting properties for a new object from a base or superclass object.Each learning objective will correspond to a __TODO__ in the [student lab notebook](../labs/custom_layers_and_models.ipynb) -- try to complete that notebook first before reviewing this solution notebook. Setup
###Code
# Import necessary libraries
import tensorflow as tf
from tensorflow import keras
###Output
_____no_output_____
###Markdown
The `Layer` class: the combination of state (weights) and some computationOne of the central abstraction in Keras is the `Layer` class. A layerencapsulates both a state (the layer's "weights") and a transformation frominputs to outputs (a "call", the layer's forward pass).Here's a densely-connected layer. It has a state: the variables `w` and `b`.
###Code
# Define a Linear class
class Linear(keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
w_init = tf.random_normal_initializer()
self.w = tf.Variable(
initial_value=w_init(shape=(input_dim, units), dtype="float32"),
trainable=True,
)
b_init = tf.zeros_initializer()
self.b = tf.Variable(
initial_value=b_init(shape=(units,), dtype="float32"), trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
###Output
_____no_output_____
###Markdown
You would use a layer by calling it on some tensor input(s), much like a Pythonfunction.
###Code
x = tf.ones((2, 2))
linear_layer = Linear(4, 2)
y = linear_layer(x)
print(y)
###Output
2021-09-14 13:30:06.886241: I tensorflow/core/common_runtime/process_util.cc:146] Creating new thread pool with default inter op setting: 2. Tune using inter_op_parallelism_threads for best performance.
###Markdown
Note that the weights `w` and `b` are automatically tracked by the layer uponbeing set as layer attributes:
###Code
assert linear_layer.weights == [linear_layer.w, linear_layer.b]
###Output
_____no_output_____
###Markdown
Note you also have access to a quicker shortcut for adding weight to a layer:the `add_weight()` method:
###Code
# TODO
# Use `add_weight()` method for adding weight to a layer
class Linear(keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
self.w = self.add_weight(
shape=(input_dim, units), initializer="random_normal", trainable=True
)
self.b = self.add_weight(shape=(units,), initializer="zeros", trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
x = tf.ones((2, 2))
linear_layer = Linear(4, 2)
y = linear_layer(x)
print(y)
###Output
tf.Tensor(
[[ 0.03132978 -0.00557932 0.02176719 -0.00405929]
[ 0.03132978 -0.00557932 0.02176719 -0.00405929]], shape=(2, 4), dtype=float32)
###Markdown
Layers can have non-trainable weightsBesides trainable weights, you can add non-trainable weights to a layer aswell. Such weights are meant not to be taken into account duringbackpropagation, when you are training the layer.Here's how to add and use a non-trainable weight:
###Code
# Add and use a non-trainable weight
class ComputeSum(keras.layers.Layer):
def __init__(self, input_dim):
super(ComputeSum, self).__init__()
self.total = tf.Variable(initial_value=tf.zeros((input_dim,)), trainable=False)
def call(self, inputs):
self.total.assign_add(tf.reduce_sum(inputs, axis=0))
return self.total
x = tf.ones((2, 2))
my_sum = ComputeSum(2)
y = my_sum(x)
print(y.numpy())
y = my_sum(x)
print(y.numpy())
###Output
[2. 2.]
[4. 4.]
###Markdown
It's part of `layer.weights`, but it gets categorized as a non-trainable weight:
###Code
print("weights:", len(my_sum.weights))
print("non-trainable weights:", len(my_sum.non_trainable_weights))
# It's not included in the trainable weights:
print("trainable_weights:", my_sum.trainable_weights)
###Output
weights: 1
non-trainable weights: 1
trainable_weights: []
###Markdown
Best practice: deferring weight creation until the shape of the inputs is knownOur `Linear` layer above took an `input_dim` argument that was used to computethe shape of the weights `w` and `b` in `__init__()`:
###Code
class Linear(keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
self.w = self.add_weight(
shape=(input_dim, units), initializer="random_normal", trainable=True
)
self.b = self.add_weight(shape=(units,), initializer="zeros", trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
###Output
_____no_output_____
###Markdown
In many cases, you may not know in advance the size of your inputs, and youwould like to lazily create weights when that value becomes known, some timeafter instantiating the layer.In the Keras API, we recommend creating layer weights in the `build(self, input_shape)` method of your layer. Like this:
###Code
# TODO
class Linear(keras.layers.Layer):
def __init__(self, units=32):
super(Linear, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
###Output
_____no_output_____
###Markdown
The `__call__()` method of your layer will automatically run build the first timeit is called. You now have a layer that's lazy and thus easier to use:
###Code
# At instantiation, we don't know on what inputs this is going to get called
linear_layer = Linear(32)
# The layer's weights are created dynamically the first time the layer is called
y = linear_layer(x)
###Output
_____no_output_____
###Markdown
Layers are recursively composableIf you assign a Layer instance as an attribute of another Layer, the outer layerwill start tracking the weights of the inner layer.We recommend creating such sublayers in the `__init__()` method (since thesublayers will typically have a build method, they will be built when theouter layer gets built).
###Code
# TODO
# Let's assume we are reusing the Linear class
# with a `build` method that we defined above.
class MLPBlock(keras.layers.Layer):
def __init__(self):
super(MLPBlock, self).__init__()
self.linear_1 = Linear(32)
self.linear_2 = Linear(32)
self.linear_3 = Linear(1)
def call(self, inputs):
x = self.linear_1(inputs)
x = tf.nn.relu(x)
x = self.linear_2(x)
x = tf.nn.relu(x)
return self.linear_3(x)
mlp = MLPBlock()
y = mlp(tf.ones(shape=(3, 64))) # The first call to the `mlp` will create the weights
print("weights:", len(mlp.weights))
print("trainable weights:", len(mlp.trainable_weights))
###Output
weights: 6
trainable weights: 6
###Markdown
The `add_loss()` methodWhen writing the `call()` method of a layer, you can create loss tensors thatyou will want to use later, when writing your training loop. This is doable bycalling `self.add_loss(value)`:
###Code
# A layer that creates an activity regularization loss
class ActivityRegularizationLayer(keras.layers.Layer):
def __init__(self, rate=1e-2):
super(ActivityRegularizationLayer, self).__init__()
self.rate = rate
def call(self, inputs):
self.add_loss(self.rate * tf.reduce_sum(inputs))
return inputs
###Output
_____no_output_____
###Markdown
These losses (including those created by any inner layer) can be retrieved via`layer.losses`. This property is reset at the start of every `__call__()` tothe top-level layer, so that `layer.losses` always contains the loss valuescreated during the last forward pass.
###Code
# TODO
class OuterLayer(keras.layers.Layer):
def __init__(self):
super(OuterLayer, self).__init__()
self.activity_reg = ActivityRegularizationLayer(1e-2)
def call(self, inputs):
return self.activity_reg(inputs)
layer = OuterLayer()
assert len(layer.losses) == 0 # No losses yet since the layer has never been called
_ = layer(tf.zeros(1, 1))
assert len(layer.losses) == 1 # We created one loss value
# `layer.losses` gets reset at the start of each __call__
_ = layer(tf.zeros(1, 1))
assert len(layer.losses) == 1 # This is the loss created during the call above
###Output
_____no_output_____
###Markdown
In addition, the `loss` property also contains regularization losses createdfor the weights of any inner layer:
###Code
class OuterLayerWithKernelRegularizer(keras.layers.Layer):
def __init__(self):
super(OuterLayerWithKernelRegularizer, self).__init__()
self.dense = keras.layers.Dense(
32, kernel_regularizer=tf.keras.regularizers.l2(1e-3)
)
def call(self, inputs):
return self.dense(inputs)
layer = OuterLayerWithKernelRegularizer()
_ = layer(tf.zeros((1, 1)))
# This is `1e-3 * sum(layer.dense.kernel ** 2)`,
# created by the `kernel_regularizer` above.
print(layer.losses)
###Output
[<tf.Tensor: shape=(), dtype=float32, numpy=0.0020471246>]
###Markdown
These losses are meant to be taken into account when writing training loops,like this:```python Instantiate an optimizer.optimizer = tf.keras.optimizers.SGD(learning_rate=1e-3)loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True) Iterate over the batches of a dataset.for x_batch_train, y_batch_train in train_dataset: with tf.GradientTape() as tape: logits = layer(x_batch_train) Logits for this minibatch Loss value for this minibatch loss_value = loss_fn(y_batch_train, logits) Add extra losses created during this forward pass: loss_value += sum(model.losses) grads = tape.gradient(loss_value, model.trainable_weights) optimizer.apply_gradients(zip(grads, model.trainable_weights))``` For a detailed guide about writing training loops, see the[guide to writing a training loop from scratch](https://www.tensorflow.org/guide/keras/writing_a_training_loop_from_scratch/).These losses also work seamlessly with `fit()` (they get automatically summedand added to the main loss, if any):
###Code
import numpy as np
inputs = keras.Input(shape=(3,))
outputs = ActivityRegularizationLayer()(inputs)
model = keras.Model(inputs, outputs)
# If there is a loss passed in `compile`, the regularization
# losses get added to it
model.compile(optimizer="adam", loss="mse")
model.fit(np.random.random((2, 3)), np.random.random((2, 3)))
# It's also possible not to pass any loss in `compile`,
# since the model already has a loss to minimize, via the `add_loss`
# call during the forward pass!
model.compile(optimizer="adam")
model.fit(np.random.random((2, 3)), np.random.random((2, 3)))
###Output
2021-09-14 13:30:07.699809: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:185] None of the MLIR Optimization Passes are enabled (registered 2)
###Markdown
The `add_metric()` methodSimilarly to `add_loss()`, layers also have an `add_metric()` methodfor tracking the moving average of a quantity during training.Consider the following layer: a "logistic endpoint" layer.It takes as inputs predictions & targets, it computes a loss which it tracksvia `add_loss()`, and it computes an accuracy scalar, which it tracks via`add_metric()`.
###Code
# TODO
class LogisticEndpoint(keras.layers.Layer):
def __init__(self, name=None):
super(LogisticEndpoint, self).__init__(name=name)
self.loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
self.accuracy_fn = keras.metrics.BinaryAccuracy()
def call(self, targets, logits, sample_weights=None):
# Compute the training-time loss value and add it
# to the layer using `self.add_loss()`.
loss = self.loss_fn(targets, logits, sample_weights)
self.add_loss(loss)
# Log accuracy as a metric and add it
# to the layer using `self.add_metric()`.
acc = self.accuracy_fn(targets, logits, sample_weights)
self.add_metric(acc, name="accuracy")
# Return the inference-time prediction tensor (for `.predict()`).
return tf.nn.softmax(logits)
###Output
_____no_output_____
###Markdown
Metrics tracked in this way are accessible via `layer.metrics`:
###Code
layer = LogisticEndpoint()
targets = tf.ones((2, 2))
logits = tf.ones((2, 2))
y = layer(targets, logits)
print("layer.metrics:", layer.metrics)
print("current accuracy value:", float(layer.metrics[0].result()))
###Output
layer.metrics: [<keras.metrics.BinaryAccuracy object at 0x7eff0931ac10>]
current accuracy value: 1.0
###Markdown
Just like for `add_loss()`, these metrics are tracked by `fit()`:
###Code
inputs = keras.Input(shape=(3,), name="inputs")
targets = keras.Input(shape=(10,), name="targets")
logits = keras.layers.Dense(10)(inputs)
predictions = LogisticEndpoint(name="predictions")(logits, targets)
model = keras.Model(inputs=[inputs, targets], outputs=predictions)
model.compile(optimizer="adam")
data = {
"inputs": np.random.random((3, 3)),
"targets": np.random.random((3, 10)),
}
model.fit(data)
###Output
1/1 [==============================] - 0s 338ms/step - loss: 1.1031 - binary_accuracy: 0.0000e+00
###Markdown
You can optionally enable serialization on your layersIf you need your custom layers to be serializable as part of a[Functional model](https://www.tensorflow.org/guide/keras/functional/), you can optionally implement a `get_config()`method:
###Code
# TODO
class Linear(keras.layers.Layer):
def __init__(self, units=32):
super(Linear, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
def get_config(self):
return {"units": self.units}
# You can enable serialization on your layers using `get_config()` method
# Now you can recreate the layer from its config:
layer = Linear(64)
config = layer.get_config()
print(config)
new_layer = Linear.from_config(config)
###Output
{'units': 64}
###Markdown
Note that the `__init__()` method of the base `Layer` class takes some keywordarguments, in particular a `name` and a `dtype`. It's good practice to passthese arguments to the parent class in `__init__()` and to include them in thelayer config:
###Code
class Linear(keras.layers.Layer):
def __init__(self, units=32, **kwargs):
super(Linear, self).__init__(**kwargs)
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
def get_config(self):
config = super(Linear, self).get_config()
config.update({"units": self.units})
return config
layer = Linear(64)
config = layer.get_config()
print(config)
new_layer = Linear.from_config(config)
###Output
{'name': 'linear_8', 'trainable': True, 'dtype': 'float32', 'units': 64}
###Markdown
If you need more flexibility when deserializing the layer from its config, youcan also override the `from_config()` class method. This is the baseimplementation of `from_config()`:```pythondef from_config(cls, config): return cls(**config)```To learn more about serialization and saving, see the complete[guide to saving and serializing models](https://www.tensorflow.org/guide/keras/save_and_serialize/). Privileged `training` argument in the `call()` methodSome layers, in particular the `BatchNormalization` layer and the `Dropout`layer, have different behaviors during training and inference. For suchlayers, it is standard practice to expose a `training` (boolean) argument inthe `call()` method.By exposing this argument in `call()`, you enable the built-in training andevaluation loops (e.g. `fit()`) to correctly use the layer in training andinference.
###Code
class CustomDropout(keras.layers.Layer):
def __init__(self, rate, **kwargs):
super(CustomDropout, self).__init__(**kwargs)
self.rate = rate
def call(self, inputs, training=None):
if training:
return tf.nn.dropout(inputs, rate=self.rate)
return inputs
###Output
_____no_output_____
###Markdown
Privileged `mask` argument in the `call()` methodThe other privileged argument supported by `call()` is the `mask` argument.You will find it in all Keras RNN layers. A mask is a boolean tensor (oneboolean value per timestep in the input) used to skip certain input timestepswhen processing timeseries data.Keras will automatically pass the correct `mask` argument to `__call__()` forlayers that support it, when a mask is generated by a prior layer.Mask-generating layers are the `Embedding`layer configured with `mask_zero=True`, and the `Masking` layer.To learn more about masking and how to write masking-enabled layers, pleasecheck out the guide["understanding padding and masking"](https://www.tensorflow.org/guide/keras/masking_and_padding/). The `Model` classIn general, you will use the `Layer` class to define inner computation blocks,and will use the `Model` class to define the outer model -- the object youwill train.For instance, in a ResNet50 model, you would have several ResNet blockssubclassing `Layer`, and a single `Model` encompassing the entire ResNet50network.The `Model` class has the same API as `Layer`, with the following differences:- It exposes built-in training, evaluation, and prediction loops(`model.fit()`, `model.evaluate()`, `model.predict()`).- It exposes the list of its inner layers, via the `model.layers` property.- It exposes saving and serialization APIs (`save()`, `save_weights()`...)Effectively, the `Layer` class corresponds to what we refer to in theliterature as a "layer" (as in "convolution layer" or "recurrent layer") or asa "block" (as in "ResNet block" or "Inception block").Meanwhile, the `Model` class corresponds to what is referred to in theliterature as a "model" (as in "deep learning model") or as a "network" (as in"deep neural network").So if you're wondering, "should I use the `Layer` class or the `Model` class?",ask yourself: will I need to call `fit()` on it? Will I need to call `save()`on it? If so, go with `Model`. If not (either because your class is just a blockin a bigger system, or because you are writing training & saving code yourself),use `Layer`.For instance, we could take our mini-resnet example above, and use it to builda `Model` that we could train with `fit()`, and that we could save with`save_weights()`: ```pythonclass ResNet(tf.keras.Model): def __init__(self, num_classes=1000): super(ResNet, self).__init__() self.block_1 = ResNetBlock() self.block_2 = ResNetBlock() self.global_pool = layers.GlobalAveragePooling2D() self.classifier = Dense(num_classes) def call(self, inputs): x = self.block_1(inputs) x = self.block_2(x) x = self.global_pool(x) return self.classifier(x)resnet = ResNet()dataset = ...resnet.fit(dataset, epochs=10)resnet.save(filepath)``` Putting it all together: an end-to-end exampleHere's what you've learned so far:- A `Layer` encapsulate a state (created in `__init__()` or `build()`) and somecomputation (defined in `call()`).- Layers can be recursively nested to create new, bigger computation blocks.- Layers can create and track losses (typically regularization losses) as wellas metrics, via `add_loss()` and `add_metric()`- The outer container, the thing you want to train, is a `Model`. A `Model` isjust like a `Layer`, but with added training and serialization utilities.Let's put all of these things together into an end-to-end example: we're goingto implement a Variational AutoEncoder (VAE). We'll train it on MNIST digits.Our VAE will be a subclass of `Model`, built as a nested composition of layersthat subclass `Layer`. It will feature a regularization loss (KL divergence).
###Code
from tensorflow.keras import layers
class Sampling(layers.Layer):
"""Uses (z_mean, z_log_var) to sample z, the vector encoding a digit."""
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
class Encoder(layers.Layer):
"""Maps MNIST digits to a triplet (z_mean, z_log_var, z)."""
def __init__(self, latent_dim=32, intermediate_dim=64, name="encoder", **kwargs):
super(Encoder, self).__init__(name=name, **kwargs)
self.dense_proj = layers.Dense(intermediate_dim, activation="relu")
self.dense_mean = layers.Dense(latent_dim)
self.dense_log_var = layers.Dense(latent_dim)
self.sampling = Sampling()
def call(self, inputs):
x = self.dense_proj(inputs)
z_mean = self.dense_mean(x)
z_log_var = self.dense_log_var(x)
z = self.sampling((z_mean, z_log_var))
return z_mean, z_log_var, z
class Decoder(layers.Layer):
"""Converts z, the encoded digit vector, back into a readable digit."""
def __init__(self, original_dim, intermediate_dim=64, name="decoder", **kwargs):
super(Decoder, self).__init__(name=name, **kwargs)
self.dense_proj = layers.Dense(intermediate_dim, activation="relu")
self.dense_output = layers.Dense(original_dim, activation="sigmoid")
def call(self, inputs):
x = self.dense_proj(inputs)
return self.dense_output(x)
class VariationalAutoEncoder(keras.Model):
"""Combines the encoder and decoder into an end-to-end model for training."""
def __init__(
self,
original_dim,
intermediate_dim=64,
latent_dim=32,
name="autoencoder",
**kwargs
):
super(VariationalAutoEncoder, self).__init__(name=name, **kwargs)
self.original_dim = original_dim
self.encoder = Encoder(latent_dim=latent_dim, intermediate_dim=intermediate_dim)
self.decoder = Decoder(original_dim, intermediate_dim=intermediate_dim)
def call(self, inputs):
z_mean, z_log_var, z = self.encoder(inputs)
reconstructed = self.decoder(z)
# Add KL divergence regularization loss.
kl_loss = -0.5 * tf.reduce_mean(
z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1
)
self.add_loss(kl_loss)
return reconstructed
###Output
_____no_output_____
###Markdown
Let's write a simple training loop on MNIST:
###Code
original_dim = 784
vae = VariationalAutoEncoder(original_dim, 64, 32)
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
mse_loss_fn = tf.keras.losses.MeanSquaredError()
loss_metric = tf.keras.metrics.Mean()
(x_train, _), _ = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape(60000, 784).astype("float32") / 255
train_dataset = tf.data.Dataset.from_tensor_slices(x_train)
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
epochs = 2
# Iterate over epochs.
for epoch in range(epochs):
print("Start of epoch %d" % (epoch,))
# Iterate over the batches of the dataset.
for step, x_batch_train in enumerate(train_dataset):
with tf.GradientTape() as tape:
reconstructed = vae(x_batch_train)
# Compute reconstruction loss
loss = mse_loss_fn(x_batch_train, reconstructed)
loss += sum(vae.losses) # Add KLD regularization loss
grads = tape.gradient(loss, vae.trainable_weights)
optimizer.apply_gradients(zip(grads, vae.trainable_weights))
loss_metric(loss)
if step % 100 == 0:
print("step %d: mean loss = %.4f" % (step, loss_metric.result()))
###Output
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11493376/11490434 [==============================] - 0s 0us/step
11501568/11490434 [==============================] - 0s 0us/step
Start of epoch 0
step 0: mean loss = 0.3305
step 100: mean loss = 0.1253
step 200: mean loss = 0.0991
step 300: mean loss = 0.0891
step 400: mean loss = 0.0842
step 500: mean loss = 0.0808
step 600: mean loss = 0.0787
step 700: mean loss = 0.0771
step 800: mean loss = 0.0759
step 900: mean loss = 0.0749
Start of epoch 1
step 0: mean loss = 0.0746
step 100: mean loss = 0.0740
step 200: mean loss = 0.0735
step 300: mean loss = 0.0730
step 400: mean loss = 0.0727
step 500: mean loss = 0.0723
step 600: mean loss = 0.0720
step 700: mean loss = 0.0717
step 800: mean loss = 0.0715
step 900: mean loss = 0.0712
###Markdown
Note that since the VAE is subclassing `Model`, it features built-in trainingloops. So you could also have trained it like this:
###Code
vae = VariationalAutoEncoder(784, 64, 32)
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
vae.compile(optimizer, loss=tf.keras.losses.MeanSquaredError())
vae.fit(x_train, x_train, epochs=2, batch_size=64)
###Output
Epoch 1/2
938/938 [==============================] - 4s 4ms/step - loss: 0.0748
Epoch 2/2
938/938 [==============================] - 4s 4ms/step - loss: 0.0676
###Markdown
Beyond object-oriented development: the Functional APIWas this example too much object-oriented development for you? You can alsobuild models using the [Functional API](https://www.tensorflow.org/guide/keras/functional/). Importantly,choosing one style or another does not prevent you from leveraging componentswritten in the other style: you can always mix-and-match.For instance, the Functional API example below reuses the same `Sampling` layerwe defined in the example above:
###Code
original_dim = 784
intermediate_dim = 64
latent_dim = 32
# Define encoder model.
original_inputs = tf.keras.Input(shape=(original_dim,), name="encoder_input")
x = layers.Dense(intermediate_dim, activation="relu")(original_inputs)
z_mean = layers.Dense(latent_dim, name="z_mean")(x)
z_log_var = layers.Dense(latent_dim, name="z_log_var")(x)
z = Sampling()((z_mean, z_log_var))
encoder = tf.keras.Model(inputs=original_inputs, outputs=z, name="encoder")
# Define decoder model.
latent_inputs = tf.keras.Input(shape=(latent_dim,), name="z_sampling")
x = layers.Dense(intermediate_dim, activation="relu")(latent_inputs)
outputs = layers.Dense(original_dim, activation="sigmoid")(x)
decoder = tf.keras.Model(inputs=latent_inputs, outputs=outputs, name="decoder")
# Define VAE model.
outputs = decoder(z)
vae = tf.keras.Model(inputs=original_inputs, outputs=outputs, name="vae")
# Add KL divergence regularization loss.
kl_loss = -0.5 * tf.reduce_mean(z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1)
vae.add_loss(kl_loss)
# Train.
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
vae.compile(optimizer, loss=tf.keras.losses.MeanSquaredError())
vae.fit(x_train, x_train, epochs=3, batch_size=64)
###Output
Epoch 1/3
938/938 [==============================] - 4s 4ms/step - loss: 0.0746
Epoch 2/3
938/938 [==============================] - 4s 4ms/step - loss: 0.0676
Epoch 3/3
938/938 [==============================] - 4s 4ms/step - loss: 0.0676
###Markdown
Making New Layers and Models via Subclassing Learning Objectives* Use Layer class as the combination of state (weights) and computation.* Defer weight creation until the shape of the inputs is known.* Build recursively composable layers.* Compute loss using add_loss() method.* Compute average using add_metric() method.* Enable serialization on layers. IntroductionThis tutorial shows how to build new layers and models via [subclassing](https://towardsdatascience.com/model-sub-classing-and-custom-training-loop-from-scratch-in-tensorflow-2-cc1d4f10fb4e).__Subclassing__ is a term that refers inheriting properties for a new object from a base or superclass object.Each learning objective will correspond to a __TODO__ in the [student lab notebook](../labs/custom_layers_and_models.ipynb) -- try to complete that notebook first before reviewing this solution notebook. Setup
###Code
# Import necessary libraries
import tensorflow as tf
from tensorflow import keras
###Output
_____no_output_____
###Markdown
The `Layer` class: the combination of state (weights) and some computationOne of the central abstraction in Keras is the `Layer` class. A layerencapsulates both a state (the layer's "weights") and a transformation frominputs to outputs (a "call", the layer's forward pass).Here's a densely-connected layer. It has a state: the variables `w` and `b`.
###Code
# Define a Linear class
class Linear(keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
w_init = tf.random_normal_initializer()
self.w = tf.Variable(
initial_value=w_init(shape=(input_dim, units), dtype="float32"),
trainable=True,
)
b_init = tf.zeros_initializer()
self.b = tf.Variable(
initial_value=b_init(shape=(units,), dtype="float32"), trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
###Output
_____no_output_____
###Markdown
You would use a layer by calling it on some tensor input(s), much like a Pythonfunction.
###Code
x = tf.ones((2, 2))
linear_layer = Linear(4, 2)
y = linear_layer(x)
print(y)
###Output
2021-09-14 13:30:06.886241: I tensorflow/core/common_runtime/process_util.cc:146] Creating new thread pool with default inter op setting: 2. Tune using inter_op_parallelism_threads for best performance.
###Markdown
Note that the weights `w` and `b` are automatically tracked by the layer uponbeing set as layer attributes:
###Code
assert linear_layer.weights == [linear_layer.w, linear_layer.b]
###Output
_____no_output_____
###Markdown
Note you also have access to a quicker shortcut for adding weight to a layer:the `add_weight()` method:
###Code
# TODO
# Use `add_weight()` method for adding weight to a layer
class Linear(keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
self.w = self.add_weight(
shape=(input_dim, units), initializer="random_normal", trainable=True
)
self.b = self.add_weight(shape=(units,), initializer="zeros", trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
x = tf.ones((2, 2))
linear_layer = Linear(4, 2)
y = linear_layer(x)
print(y)
###Output
tf.Tensor(
[[ 0.03132978 -0.00557932 0.02176719 -0.00405929]
[ 0.03132978 -0.00557932 0.02176719 -0.00405929]], shape=(2, 4), dtype=float32)
###Markdown
Layers can have non-trainable weightsBesides trainable weights, you can add non-trainable weights to a layer aswell. Such weights are meant not to be taken into account duringbackpropagation, when you are training the layer.Here's how to add and use a non-trainable weight:
###Code
# Add and use a non-trainable weight
class ComputeSum(keras.layers.Layer):
def __init__(self, input_dim):
super(ComputeSum, self).__init__()
self.total = tf.Variable(initial_value=tf.zeros((input_dim,)), trainable=False)
def call(self, inputs):
self.total.assign_add(tf.reduce_sum(inputs, axis=0))
return self.total
x = tf.ones((2, 2))
my_sum = ComputeSum(2)
y = my_sum(x)
print(y.numpy())
y = my_sum(x)
print(y.numpy())
###Output
[2. 2.]
[4. 4.]
###Markdown
It's part of `layer.weights`, but it gets categorized as a non-trainable weight:
###Code
print("weights:", len(my_sum.weights))
print("non-trainable weights:", len(my_sum.non_trainable_weights))
# It's not included in the trainable weights:
print("trainable_weights:", my_sum.trainable_weights)
###Output
weights: 1
non-trainable weights: 1
trainable_weights: []
###Markdown
Best practice: deferring weight creation until the shape of the inputs is knownOur `Linear` layer above took an `input_dim` argument that was used to computethe shape of the weights `w` and `b` in `__init__()`:
###Code
class Linear(keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
self.w = self.add_weight(
shape=(input_dim, units), initializer="random_normal", trainable=True
)
self.b = self.add_weight(shape=(units,), initializer="zeros", trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
###Output
_____no_output_____
###Markdown
In many cases, you may not know in advance the size of your inputs, and youwould like to lazily create weights when that value becomes known, some timeafter instantiating the layer.In the Keras API, we recommend creating layer weights in the `build(self, input_shape)` method of your layer. Like this:
###Code
# TODO
class Linear(keras.layers.Layer):
def __init__(self, units=32):
super(Linear, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
###Output
_____no_output_____
###Markdown
The `__call__()` method of your layer will automatically run build the first timeit is called. You now have a layer that's lazy and thus easier to use:
###Code
# At instantiation, we don't know on what inputs this is going to get called
linear_layer = Linear(32)
# The layer's weights are created dynamically the first time the layer is called
y = linear_layer(x)
###Output
_____no_output_____
###Markdown
Layers are recursively composableIf you assign a Layer instance as an attribute of another Layer, the outer layerwill start tracking the weights of the inner layer.We recommend creating such sublayers in the `__init__()` method (since thesublayers will typically have a build method, they will be built when theouter layer gets built).
###Code
# TODO
# Let's assume we are reusing the Linear class
# with a `build` method that we defined above.
class MLPBlock(keras.layers.Layer):
def __init__(self):
super(MLPBlock, self).__init__()
self.linear_1 = Linear(32)
self.linear_2 = Linear(32)
self.linear_3 = Linear(1)
def call(self, inputs):
x = self.linear_1(inputs)
x = tf.nn.relu(x)
x = self.linear_2(x)
x = tf.nn.relu(x)
return self.linear_3(x)
mlp = MLPBlock()
y = mlp(tf.ones(shape=(3, 64))) # The first call to the `mlp` will create the weights
print("weights:", len(mlp.weights))
print("trainable weights:", len(mlp.trainable_weights))
###Output
weights: 6
trainable weights: 6
###Markdown
The `add_loss()` methodWhen writing the `call()` method of a layer, you can create loss tensors thatyou will want to use later, when writing your training loop. This is doable bycalling `self.add_loss(value)`:
###Code
# A layer that creates an activity regularization loss
class ActivityRegularizationLayer(keras.layers.Layer):
def __init__(self, rate=1e-2):
super(ActivityRegularizationLayer, self).__init__()
self.rate = rate
def call(self, inputs):
self.add_loss(self.rate * tf.reduce_sum(inputs))
return inputs
###Output
_____no_output_____
###Markdown
These losses (including those created by any inner layer) can be retrieved via`layer.losses`. This property is reset at the start of every `__call__()` tothe top-level layer, so that `layer.losses` always contains the loss valuescreated during the last forward pass.
###Code
# TODO
class OuterLayer(keras.layers.Layer):
def __init__(self):
super(OuterLayer, self).__init__()
self.activity_reg = ActivityRegularizationLayer(1e-2)
def call(self, inputs):
return self.activity_reg(inputs)
layer = OuterLayer()
assert len(layer.losses) == 0 # No losses yet since the layer has never been called
_ = layer(tf.zeros(1, 1))
assert len(layer.losses) == 1 # We created one loss value
# `layer.losses` gets reset at the start of each __call__
_ = layer(tf.zeros(1, 1))
assert len(layer.losses) == 1 # This is the loss created during the call above
###Output
_____no_output_____
###Markdown
In addition, the `loss` property also contains regularization losses createdfor the weights of any inner layer:
###Code
class OuterLayerWithKernelRegularizer(keras.layers.Layer):
def __init__(self):
super(OuterLayerWithKernelRegularizer, self).__init__()
self.dense = keras.layers.Dense(
32, kernel_regularizer=tf.keras.regularizers.l2(1e-3)
)
def call(self, inputs):
return self.dense(inputs)
layer = OuterLayerWithKernelRegularizer()
_ = layer(tf.zeros((1, 1)))
# This is `1e-3 * sum(layer.dense.kernel ** 2)`,
# created by the `kernel_regularizer` above.
print(layer.losses)
###Output
[<tf.Tensor: shape=(), dtype=float32, numpy=0.0020471246>]
###Markdown
These losses are meant to be taken into account when writing training loops,like this:```python Instantiate an optimizer.optimizer = tf.keras.optimizers.SGD(learning_rate=1e-3)loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True) Iterate over the batches of a dataset.for x_batch_train, y_batch_train in train_dataset: with tf.GradientTape() as tape: logits = layer(x_batch_train) Logits for this minibatch Loss value for this minibatch loss_value = loss_fn(y_batch_train, logits) Add extra losses created during this forward pass: loss_value += sum(model.losses) grads = tape.gradient(loss_value, model.trainable_weights) optimizer.apply_gradients(zip(grads, model.trainable_weights))``` For a detailed guide about writing training loops, see the[guide to writing a training loop from scratch](https://www.tensorflow.org/guide/keras/writing_a_training_loop_from_scratch/).These losses also work seamlessly with `fit()` (they get automatically summedand added to the main loss, if any):
###Code
import numpy as np
inputs = keras.Input(shape=(3,))
outputs = ActivityRegularizationLayer()(inputs)
model = keras.Model(inputs, outputs)
# If there is a loss passed in `compile`, the regularization
# losses get added to it
model.compile(optimizer="adam", loss="mse")
model.fit(np.random.random((2, 3)), np.random.random((2, 3)))
# It's also possible not to pass any loss in `compile`,
# since the model already has a loss to minimize, via the `add_loss`
# call during the forward pass!
model.compile(optimizer="adam")
model.fit(np.random.random((2, 3)), np.random.random((2, 3)))
###Output
2021-09-14 13:30:07.699809: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:185] None of the MLIR Optimization Passes are enabled (registered 2)
###Markdown
The `add_metric()` methodSimilarly to `add_loss()`, layers also have an `add_metric()` methodfor tracking the moving average of a quantity during training.Consider the following layer: a "logistic endpoint" layer.It takes as inputs predictions & targets, it computes a loss which it tracksvia `add_loss()`, and it computes an accuracy scalar, which it tracks via`add_metric()`.
###Code
# TODO
class LogisticEndpoint(keras.layers.Layer):
def __init__(self, name=None):
super(LogisticEndpoint, self).__init__(name=name)
self.loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
self.accuracy_fn = keras.metrics.BinaryAccuracy()
def call(self, targets, logits, sample_weights=None):
# Compute the training-time loss value and add it
# to the layer using `self.add_loss()`.
loss = self.loss_fn(targets, logits, sample_weights)
self.add_loss(loss)
# Log accuracy as a metric and add it
# to the layer using `self.add_metric()`.
acc = self.accuracy_fn(targets, logits, sample_weights)
self.add_metric(acc, name="accuracy")
# Return the inference-time prediction tensor (for `.predict()`).
return tf.nn.softmax(logits)
###Output
_____no_output_____
###Markdown
Metrics tracked in this way are accessible via `layer.metrics`:
###Code
layer = LogisticEndpoint()
targets = tf.ones((2, 2))
logits = tf.ones((2, 2))
y = layer(targets, logits)
print("layer.metrics:", layer.metrics)
print("current accuracy value:", float(layer.metrics[0].result()))
###Output
layer.metrics: [<keras.metrics.BinaryAccuracy object at 0x7eff0931ac10>]
current accuracy value: 1.0
###Markdown
Just like for `add_loss()`, these metrics are tracked by `fit()`:
###Code
inputs = keras.Input(shape=(3,), name="inputs")
targets = keras.Input(shape=(10,), name="targets")
logits = keras.layers.Dense(10)(inputs)
predictions = LogisticEndpoint(name="predictions")(logits, targets)
model = keras.Model(inputs=[inputs, targets], outputs=predictions)
model.compile(optimizer="adam")
data = {
"inputs": np.random.random((3, 3)),
"targets": np.random.random((3, 10)),
}
model.fit(data)
###Output
1/1 [==============================] - 0s 338ms/step - loss: 1.1031 - binary_accuracy: 0.0000e+00
###Markdown
You can optionally enable serialization on your layersIf you need your custom layers to be serializable as part of a[Functional model](https://www.tensorflow.org/guide/keras/functional/), you can optionally implement a `get_config()`method:
###Code
# TODO
class Linear(keras.layers.Layer):
def __init__(self, units=32):
super(Linear, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
def get_config(self):
return {"units": self.units}
# You can enable serialization on your layers using `get_config()` method
# Now you can recreate the layer from its config:
layer = Linear(64)
config = layer.get_config()
print(config)
new_layer = Linear.from_config(config)
###Output
{'units': 64}
###Markdown
Note that the `__init__()` method of the base `Layer` class takes some keywordarguments, in particular a `name` and a `dtype`. It's good practice to passthese arguments to the parent class in `__init__()` and to include them in thelayer config:
###Code
class Linear(keras.layers.Layer):
def __init__(self, units=32, **kwargs):
super(Linear, self).__init__(**kwargs)
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
def get_config(self):
config = super(Linear, self).get_config()
config.update({"units": self.units})
return config
layer = Linear(64)
config = layer.get_config()
print(config)
new_layer = Linear.from_config(config)
###Output
{'name': 'linear_8', 'trainable': True, 'dtype': 'float32', 'units': 64}
###Markdown
If you need more flexibility when deserializing the layer from its config, youcan also override the `from_config()` class method. This is the baseimplementation of `from_config()`:```pythondef from_config(cls, config): return cls(**config)```To learn more about serialization and saving, see the complete[guide to saving and serializing models](https://www.tensorflow.org/guide/keras/save_and_serialize/). Privileged `training` argument in the `call()` methodSome layers, in particular the `BatchNormalization` layer and the `Dropout`layer, have different behaviors during training and inference. For suchlayers, it is standard practice to expose a `training` (boolean) argument inthe `call()` method.By exposing this argument in `call()`, you enable the built-in training andevaluation loops (e.g. `fit()`) to correctly use the layer in training andinference.
###Code
class CustomDropout(keras.layers.Layer):
def __init__(self, rate, **kwargs):
super(CustomDropout, self).__init__(**kwargs)
self.rate = rate
def call(self, inputs, training=None):
if training:
return tf.nn.dropout(inputs, rate=self.rate)
return inputs
###Output
_____no_output_____
###Markdown
Privileged `mask` argument in the `call()` methodThe other privileged argument supported by `call()` is the `mask` argument.You will find it in all Keras RNN layers. A mask is a boolean tensor (oneboolean value per timestep in the input) used to skip certain input timestepswhen processing timeseries data.Keras will automatically pass the correct `mask` argument to `__call__()` forlayers that support it, when a mask is generated by a prior layer.Mask-generating layers are the `Embedding`layer configured with `mask_zero=True`, and the `Masking` layer.To learn more about masking and how to write masking-enabled layers, pleasecheck out the guide["understanding padding and masking"](https://www.tensorflow.org/guide/keras/masking_and_padding/). The `Model` classIn general, you will use the `Layer` class to define inner computation blocks,and will use the `Model` class to define the outer model -- the object youwill train.For instance, in a ResNet50 model, you would have several ResNet blockssubclassing `Layer`, and a single `Model` encompassing the entire ResNet50network.The `Model` class has the same API as `Layer`, with the following differences:- It exposes built-in training, evaluation, and prediction loops(`model.fit()`, `model.evaluate()`, `model.predict()`).- It exposes the list of its inner layers, via the `model.layers` property.- It exposes saving and serialization APIs (`save()`, `save_weights()`...)Effectively, the `Layer` class corresponds to what we refer to in theliterature as a "layer" (as in "convolution layer" or "recurrent layer") or asa "block" (as in "ResNet block" or "Inception block").Meanwhile, the `Model` class corresponds to what is referred to in theliterature as a "model" (as in "deep learning model") or as a "network" (as in"deep neural network").So if you're wondering, "should I use the `Layer` class or the `Model` class?",ask yourself: will I need to call `fit()` on it? Will I need to call `save()`on it? If so, go with `Model`. If not (either because your class is just a blockin a bigger system, or because you are writing training & saving code yourself),use `Layer`.For instance, we could take our mini-resnet example above, and use it to builda `Model` that we could train with `fit()`, and that we could save with`save_weights()`: ```pythonclass ResNet(tf.keras.Model): def __init__(self, num_classes=1000): super(ResNet, self).__init__() self.block_1 = ResNetBlock() self.block_2 = ResNetBlock() self.global_pool = layers.GlobalAveragePooling2D() self.classifier = Dense(num_classes) def call(self, inputs): x = self.block_1(inputs) x = self.block_2(x) x = self.global_pool(x) return self.classifier(x)resnet = ResNet()dataset = ...resnet.fit(dataset, epochs=10)resnet.save(filepath)``` Putting it all together: an end-to-end exampleHere's what you've learned so far:- A `Layer` encapsulate a state (created in `__init__()` or `build()`) and somecomputation (defined in `call()`).- Layers can be recursively nested to create new, bigger computation blocks.- Layers can create and track losses (typically regularization losses) as wellas metrics, via `add_loss()` and `add_metric()`- The outer container, the thing you want to train, is a `Model`. A `Model` isjust like a `Layer`, but with added training and serialization utilities.Let's put all of these things together into an end-to-end example: we're goingto implement a Variational AutoEncoder (VAE). We'll train it on MNIST digits.Our VAE will be a subclass of `Model`, built as a nested composition of layersthat subclass `Layer`. It will feature a regularization loss (KL divergence).
###Code
from tensorflow.keras import layers
class Sampling(layers.Layer):
"""Uses (z_mean, z_log_var) to sample z, the vector encoding a digit."""
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
class Encoder(layers.Layer):
"""Maps MNIST digits to a triplet (z_mean, z_log_var, z)."""
def __init__(self, latent_dim=32, intermediate_dim=64, name="encoder", **kwargs):
super(Encoder, self).__init__(name=name, **kwargs)
self.dense_proj = layers.Dense(intermediate_dim, activation="relu")
self.dense_mean = layers.Dense(latent_dim)
self.dense_log_var = layers.Dense(latent_dim)
self.sampling = Sampling()
def call(self, inputs):
x = self.dense_proj(inputs)
z_mean = self.dense_mean(x)
z_log_var = self.dense_log_var(x)
z = self.sampling((z_mean, z_log_var))
return z_mean, z_log_var, z
class Decoder(layers.Layer):
"""Converts z, the encoded digit vector, back into a readable digit."""
def __init__(self, original_dim, intermediate_dim=64, name="decoder", **kwargs):
super(Decoder, self).__init__(name=name, **kwargs)
self.dense_proj = layers.Dense(intermediate_dim, activation="relu")
self.dense_output = layers.Dense(original_dim, activation="sigmoid")
def call(self, inputs):
x = self.dense_proj(inputs)
return self.dense_output(x)
class VariationalAutoEncoder(keras.Model):
"""Combines the encoder and decoder into an end-to-end model for training."""
def __init__(
self,
original_dim,
intermediate_dim=64,
latent_dim=32,
name="autoencoder",
**kwargs
):
super(VariationalAutoEncoder, self).__init__(name=name, **kwargs)
self.original_dim = original_dim
self.encoder = Encoder(latent_dim=latent_dim, intermediate_dim=intermediate_dim)
self.decoder = Decoder(original_dim, intermediate_dim=intermediate_dim)
def call(self, inputs):
z_mean, z_log_var, z = self.encoder(inputs)
reconstructed = self.decoder(z)
# Add KL divergence regularization loss.
kl_loss = -0.5 * tf.reduce_mean(
z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1
)
self.add_loss(kl_loss)
return reconstructed
###Output
_____no_output_____
###Markdown
Let's write a simple training loop on MNIST:
###Code
original_dim = 784
vae = VariationalAutoEncoder(original_dim, 64, 32)
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
mse_loss_fn = tf.keras.losses.MeanSquaredError()
loss_metric = tf.keras.metrics.Mean()
(x_train, _), _ = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape(60000, 784).astype("float32") / 255
train_dataset = tf.data.Dataset.from_tensor_slices(x_train)
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
epochs = 2
# Iterate over epochs.
for epoch in range(epochs):
print("Start of epoch %d" % (epoch,))
# Iterate over the batches of the dataset.
for step, x_batch_train in enumerate(train_dataset):
with tf.GradientTape() as tape:
reconstructed = vae(x_batch_train)
# Compute reconstruction loss
loss = mse_loss_fn(x_batch_train, reconstructed)
loss += sum(vae.losses) # Add KLD regularization loss
grads = tape.gradient(loss, vae.trainable_weights)
optimizer.apply_gradients(zip(grads, vae.trainable_weights))
loss_metric(loss)
if step % 100 == 0:
print("step %d: mean loss = %.4f" % (step, loss_metric.result()))
###Output
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11493376/11490434 [==============================] - 0s 0us/step
11501568/11490434 [==============================] - 0s 0us/step
Start of epoch 0
step 0: mean loss = 0.3305
step 100: mean loss = 0.1253
step 200: mean loss = 0.0991
step 300: mean loss = 0.0891
step 400: mean loss = 0.0842
step 500: mean loss = 0.0808
step 600: mean loss = 0.0787
step 700: mean loss = 0.0771
step 800: mean loss = 0.0759
step 900: mean loss = 0.0749
Start of epoch 1
step 0: mean loss = 0.0746
step 100: mean loss = 0.0740
step 200: mean loss = 0.0735
step 300: mean loss = 0.0730
step 400: mean loss = 0.0727
step 500: mean loss = 0.0723
step 600: mean loss = 0.0720
step 700: mean loss = 0.0717
step 800: mean loss = 0.0715
step 900: mean loss = 0.0712
###Markdown
Note that since the VAE is subclassing `Model`, it features built-in trainingloops. So you could also have trained it like this:
###Code
vae = VariationalAutoEncoder(784, 64, 32)
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
vae.compile(optimizer, loss=tf.keras.losses.MeanSquaredError())
vae.fit(x_train, x_train, epochs=2, batch_size=64)
###Output
Epoch 1/2
938/938 [==============================] - 4s 4ms/step - loss: 0.0748
Epoch 2/2
938/938 [==============================] - 4s 4ms/step - loss: 0.0676
###Markdown
Beyond object-oriented development: the Functional APIWas this example too much object-oriented development for you? You can alsobuild models using the [Functional API](https://www.tensorflow.org/guide/keras/functional/). Importantly,choosing one style or another does not prevent you from leveraging componentswritten in the other style: you can always mix-and-match.For instance, the Functional API example below reuses the same `Sampling` layerwe defined in the example above:
###Code
original_dim = 784
intermediate_dim = 64
latent_dim = 32
# Define encoder model.
original_inputs = tf.keras.Input(shape=(original_dim,), name="encoder_input")
x = layers.Dense(intermediate_dim, activation="relu")(original_inputs)
z_mean = layers.Dense(latent_dim, name="z_mean")(x)
z_log_var = layers.Dense(latent_dim, name="z_log_var")(x)
z = Sampling()((z_mean, z_log_var))
encoder = tf.keras.Model(inputs=original_inputs, outputs=z, name="encoder")
# Define decoder model.
latent_inputs = tf.keras.Input(shape=(latent_dim,), name="z_sampling")
x = layers.Dense(intermediate_dim, activation="relu")(latent_inputs)
outputs = layers.Dense(original_dim, activation="sigmoid")(x)
decoder = tf.keras.Model(inputs=latent_inputs, outputs=outputs, name="decoder")
# Define VAE model.
outputs = decoder(z)
vae = tf.keras.Model(inputs=original_inputs, outputs=outputs, name="vae")
# Add KL divergence regularization loss.
kl_loss = -0.5 * tf.reduce_mean(z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1)
vae.add_loss(kl_loss)
# Train.
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
vae.compile(optimizer, loss=tf.keras.losses.MeanSquaredError())
vae.fit(x_train, x_train, epochs=3, batch_size=64)
###Output
Epoch 1/3
938/938 [==============================] - 4s 4ms/step - loss: 0.0746
Epoch 2/3
938/938 [==============================] - 4s 4ms/step - loss: 0.0676
Epoch 3/3
938/938 [==============================] - 4s 4ms/step - loss: 0.0676
###Markdown
Making new Layers and Models via subclassing Learning Objectives* Layer class: the combination of state (weights) and computation.* Defer weight creation until the shape of the inputs is known.* Layers are recursively composable.* Compute loss using add_loss() method.* Compute average using add_metric() method.* Enable serialization on layers. IntroductionThis tutorial shows how to build new layers and models via [subclassing](https://towardsdatascience.com/model-sub-classing-and-custom-training-loop-from-scratch-in-tensorflow-2-cc1d4f10fb4e).__Subclassing__ is a term that refers inheriting properties for a new object from a base or superclass object.Each learning objective will correspond to a __TODO__ in the [student lab notebook](../labs/custom_layers_and_models.ipynb) -- try to complete that notebook first before reviewing this solution notebook. Setup
###Code
import tensorflow as tf
from tensorflow import keras
###Output
_____no_output_____
###Markdown
The `Layer` class: the combination of state (weights) and some computationOne of the central abstraction in Keras is the `Layer` class. A layerencapsulates both a state (the layer's "weights") and a transformation frominputs to outputs (a "call", the layer's forward pass).Here's a densely-connected layer. It has a state: the variables `w` and `b`.
###Code
class Linear(keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
w_init = tf.random_normal_initializer()
self.w = tf.Variable(
initial_value=w_init(shape=(input_dim, units), dtype="float32"),
trainable=True,
)
b_init = tf.zeros_initializer()
self.b = tf.Variable(
initial_value=b_init(shape=(units,), dtype="float32"), trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
###Output
_____no_output_____
###Markdown
You would use a layer by calling it on some tensor input(s), much like a Pythonfunction.
###Code
x = tf.ones((2, 2))
linear_layer = Linear(4, 2)
y = linear_layer(x)
print(y)
###Output
2021-09-14 13:30:06.886241: I tensorflow/core/common_runtime/process_util.cc:146] Creating new thread pool with default inter op setting: 2. Tune using inter_op_parallelism_threads for best performance.
###Markdown
Note that the weights `w` and `b` are automatically tracked by the layer uponbeing set as layer attributes:
###Code
assert linear_layer.weights == [linear_layer.w, linear_layer.b]
###Output
_____no_output_____
###Markdown
Note you also have access to a quicker shortcut for adding weight to a layer:the `add_weight()` method:
###Code
class Linear(keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
self.w = self.add_weight(
shape=(input_dim, units), initializer="random_normal", trainable=True
)
self.b = self.add_weight(shape=(units,), initializer="zeros", trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
x = tf.ones((2, 2))
linear_layer = Linear(4, 2)
y = linear_layer(x)
print(y)
###Output
tf.Tensor(
[[ 0.03132978 -0.00557932 0.02176719 -0.00405929]
[ 0.03132978 -0.00557932 0.02176719 -0.00405929]], shape=(2, 4), dtype=float32)
###Markdown
Layers can have non-trainable weightsBesides trainable weights, you can add non-trainable weights to a layer aswell. Such weights are meant not to be taken into account duringbackpropagation, when you are training the layer.Here's how to add and use a non-trainable weight:
###Code
class ComputeSum(keras.layers.Layer):
def __init__(self, input_dim):
super(ComputeSum, self).__init__()
self.total = tf.Variable(initial_value=tf.zeros((input_dim,)), trainable=False)
def call(self, inputs):
self.total.assign_add(tf.reduce_sum(inputs, axis=0))
return self.total
x = tf.ones((2, 2))
my_sum = ComputeSum(2)
y = my_sum(x)
print(y.numpy())
y = my_sum(x)
print(y.numpy())
###Output
[2. 2.]
[4. 4.]
###Markdown
It's part of `layer.weights`, but it gets categorized as a non-trainable weight:
###Code
print("weights:", len(my_sum.weights))
print("non-trainable weights:", len(my_sum.non_trainable_weights))
# It's not included in the trainable weights:
print("trainable_weights:", my_sum.trainable_weights)
###Output
weights: 1
non-trainable weights: 1
trainable_weights: []
###Markdown
Best practice: deferring weight creation until the shape of the inputs is knownOur `Linear` layer above took an `input_dim` argument that was used to computethe shape of the weights `w` and `b` in `__init__()`:
###Code
class Linear(keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
self.w = self.add_weight(
shape=(input_dim, units), initializer="random_normal", trainable=True
)
self.b = self.add_weight(shape=(units,), initializer="zeros", trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
###Output
_____no_output_____
###Markdown
In many cases, you may not know in advance the size of your inputs, and youwould like to lazily create weights when that value becomes known, some timeafter instantiating the layer.In the Keras API, we recommend creating layer weights in the `build(self,inputs_shape)` method of your layer. Like this:
###Code
class Linear(keras.layers.Layer):
def __init__(self, units=32):
super(Linear, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
###Output
_____no_output_____
###Markdown
The `__call__()` method of your layer will automatically run build the first timeit is called. You now have a layer that's lazy and thus easier to use:
###Code
# At instantiation, we don't know on what inputs this is going to get called
linear_layer = Linear(32)
# The layer's weights are created dynamically the first time the layer is called
y = linear_layer(x)
###Output
_____no_output_____
###Markdown
Layers are recursively composableIf you assign a Layer instance as an attribute of another Layer, the outer layerwill start tracking the weights of the inner layer.We recommend creating such sublayers in the `__init__()` method (since thesublayers will typically have a build method, they will be built when theouter layer gets built).
###Code
# Let's assume we are reusing the Linear class
# with a `build` method that we defined above.
class MLPBlock(keras.layers.Layer):
def __init__(self):
super(MLPBlock, self).__init__()
self.linear_1 = Linear(32)
self.linear_2 = Linear(32)
self.linear_3 = Linear(1)
def call(self, inputs):
x = self.linear_1(inputs)
x = tf.nn.relu(x)
x = self.linear_2(x)
x = tf.nn.relu(x)
return self.linear_3(x)
mlp = MLPBlock()
y = mlp(tf.ones(shape=(3, 64))) # The first call to the `mlp` will create the weights
print("weights:", len(mlp.weights))
print("trainable weights:", len(mlp.trainable_weights))
###Output
weights: 6
trainable weights: 6
###Markdown
The `add_loss()` methodWhen writing the `call()` method of a layer, you can create loss tensors thatyou will want to use later, when writing your training loop. This is doable bycalling `self.add_loss(value)`:
###Code
# A layer that creates an activity regularization loss
class ActivityRegularizationLayer(keras.layers.Layer):
def __init__(self, rate=1e-2):
super(ActivityRegularizationLayer, self).__init__()
self.rate = rate
def call(self, inputs):
self.add_loss(self.rate * tf.reduce_sum(inputs))
return inputs
###Output
_____no_output_____
###Markdown
These losses (including those created by any inner layer) can be retrieved via`layer.losses`. This property is reset at the start of every `__call__()` tothe top-level layer, so that `layer.losses` always contains the loss valuescreated during the last forward pass.
###Code
class OuterLayer(keras.layers.Layer):
def __init__(self):
super(OuterLayer, self).__init__()
self.activity_reg = ActivityRegularizationLayer(1e-2)
def call(self, inputs):
return self.activity_reg(inputs)
layer = OuterLayer()
assert len(layer.losses) == 0 # No losses yet since the layer has never been called
_ = layer(tf.zeros(1, 1))
assert len(layer.losses) == 1 # We created one loss value
# `layer.losses` gets reset at the start of each __call__
_ = layer(tf.zeros(1, 1))
assert len(layer.losses) == 1 # This is the loss created during the call above
###Output
_____no_output_____
###Markdown
In addition, the `loss` property also contains regularization losses createdfor the weights of any inner layer:
###Code
class OuterLayerWithKernelRegularizer(keras.layers.Layer):
def __init__(self):
super(OuterLayerWithKernelRegularizer, self).__init__()
self.dense = keras.layers.Dense(
32, kernel_regularizer=tf.keras.regularizers.l2(1e-3)
)
def call(self, inputs):
return self.dense(inputs)
layer = OuterLayerWithKernelRegularizer()
_ = layer(tf.zeros((1, 1)))
# This is `1e-3 * sum(layer.dense.kernel ** 2)`,
# created by the `kernel_regularizer` above.
print(layer.losses)
###Output
[<tf.Tensor: shape=(), dtype=float32, numpy=0.0020471246>]
###Markdown
These losses are meant to be taken into account when writing training loops,like this:```python Instantiate an optimizer.optimizer = tf.keras.optimizers.SGD(learning_rate=1e-3)loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True) Iterate over the batches of a dataset.for x_batch_train, y_batch_train in train_dataset: with tf.GradientTape() as tape: logits = layer(x_batch_train) Logits for this minibatch Loss value for this minibatch loss_value = loss_fn(y_batch_train, logits) Add extra losses created during this forward pass: loss_value += sum(model.losses) grads = tape.gradient(loss_value, model.trainable_weights) optimizer.apply_gradients(zip(grads, model.trainable_weights))``` For a detailed guide about writing training loops, see the[guide to writing a training loop from scratch](https://www.tensorflow.org/guide/keras/writing_a_training_loop_from_scratch/).These losses also work seamlessly with `fit()` (they get automatically summedand added to the main loss, if any):
###Code
import numpy as np
inputs = keras.Input(shape=(3,))
outputs = ActivityRegularizationLayer()(inputs)
model = keras.Model(inputs, outputs)
# If there is a loss passed in `compile`, the regularization
# losses get added to it
model.compile(optimizer="adam", loss="mse")
model.fit(np.random.random((2, 3)), np.random.random((2, 3)))
# It's also possible not to pass any loss in `compile`,
# since the model already has a loss to minimize, via the `add_loss`
# call during the forward pass!
model.compile(optimizer="adam")
model.fit(np.random.random((2, 3)), np.random.random((2, 3)))
###Output
2021-09-14 13:30:07.699809: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:185] None of the MLIR Optimization Passes are enabled (registered 2)
###Markdown
The `add_metric()` methodSimilarly to `add_loss()`, layers also have an `add_metric()` methodfor tracking the moving average of a quantity during training.Consider the following layer: a "logistic endpoint" layer.It takes as inputs predictions & targets, it computes a loss which it tracksvia `add_loss()`, and it computes an accuracy scalar, which it tracks via`add_metric()`.
###Code
class LogisticEndpoint(keras.layers.Layer):
def __init__(self, name=None):
super(LogisticEndpoint, self).__init__(name=name)
self.loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
self.accuracy_fn = keras.metrics.BinaryAccuracy()
def call(self, targets, logits, sample_weights=None):
# Compute the training-time loss value and add it
# to the layer using `self.add_loss()`.
loss = self.loss_fn(targets, logits, sample_weights)
self.add_loss(loss)
# Log accuracy as a metric and add it
# to the layer using `self.add_metric()`.
acc = self.accuracy_fn(targets, logits, sample_weights)
self.add_metric(acc, name="accuracy")
# Return the inference-time prediction tensor (for `.predict()`).
return tf.nn.softmax(logits)
###Output
_____no_output_____
###Markdown
Metrics tracked in this way are accessible via `layer.metrics`:
###Code
layer = LogisticEndpoint()
targets = tf.ones((2, 2))
logits = tf.ones((2, 2))
y = layer(targets, logits)
print("layer.metrics:", layer.metrics)
print("current accuracy value:", float(layer.metrics[0].result()))
###Output
layer.metrics: [<keras.metrics.BinaryAccuracy object at 0x7eff0931ac10>]
current accuracy value: 1.0
###Markdown
Just like for `add_loss()`, these metrics are tracked by `fit()`:
###Code
inputs = keras.Input(shape=(3,), name="inputs")
targets = keras.Input(shape=(10,), name="targets")
logits = keras.layers.Dense(10)(inputs)
predictions = LogisticEndpoint(name="predictions")(logits, targets)
model = keras.Model(inputs=[inputs, targets], outputs=predictions)
model.compile(optimizer="adam")
data = {
"inputs": np.random.random((3, 3)),
"targets": np.random.random((3, 10)),
}
model.fit(data)
###Output
1/1 [==============================] - 0s 338ms/step - loss: 1.1031 - binary_accuracy: 0.0000e+00
###Markdown
You can optionally enable serialization on your layersIf you need your custom layers to be serializable as part of a[Functional model](https://www.tensorflow.org/guide/keras/functional/), you can optionally implement a `get_config()`method:
###Code
class Linear(keras.layers.Layer):
def __init__(self, units=32):
super(Linear, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
def get_config(self):
return {"units": self.units}
# Now you can recreate the layer from its config:
layer = Linear(64)
config = layer.get_config()
print(config)
new_layer = Linear.from_config(config)
###Output
{'units': 64}
###Markdown
Note that the `__init__()` method of the base `Layer` class takes some keywordarguments, in particular a `name` and a `dtype`. It's good practice to passthese arguments to the parent class in `__init__()` and to include them in thelayer config:
###Code
class Linear(keras.layers.Layer):
def __init__(self, units=32, **kwargs):
super(Linear, self).__init__(**kwargs)
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
def get_config(self):
config = super(Linear, self).get_config()
config.update({"units": self.units})
return config
layer = Linear(64)
config = layer.get_config()
print(config)
new_layer = Linear.from_config(config)
###Output
{'name': 'linear_8', 'trainable': True, 'dtype': 'float32', 'units': 64}
###Markdown
If you need more flexibility when deserializing the layer from its config, youcan also override the `from_config()` class method. This is the baseimplementation of `from_config()`:```pythondef from_config(cls, config): return cls(**config)```To learn more about serialization and saving, see the complete[guide to saving and serializing models](https://www.tensorflow.org/guide/keras/save_and_serialize/). Privileged `training` argument in the `call()` methodSome layers, in particular the `BatchNormalization` layer and the `Dropout`layer, have different behaviors during training and inference. For suchlayers, it is standard practice to expose a `training` (boolean) argument inthe `call()` method.By exposing this argument in `call()`, you enable the built-in training andevaluation loops (e.g. `fit()`) to correctly use the layer in training andinference.
###Code
class CustomDropout(keras.layers.Layer):
def __init__(self, rate, **kwargs):
super(CustomDropout, self).__init__(**kwargs)
self.rate = rate
def call(self, inputs, training=None):
if training:
return tf.nn.dropout(inputs, rate=self.rate)
return inputs
###Output
_____no_output_____
###Markdown
Privileged `mask` argument in the `call()` methodThe other privileged argument supported by `call()` is the `mask` argument.You will find it in all Keras RNN layers. A mask is a boolean tensor (oneboolean value per timestep in the input) used to skip certain input timestepswhen processing timeseries data.Keras will automatically pass the correct `mask` argument to `__call__()` forlayers that support it, when a mask is generated by a prior layer.Mask-generating layers are the `Embedding`layer configured with `mask_zero=True`, and the `Masking` layer.To learn more about masking and how to write masking-enabled layers, pleasecheck out the guide["understanding padding and masking"](https://www.tensorflow.org/guide/keras/masking_and_padding/). The `Model` classIn general, you will use the `Layer` class to define inner computation blocks,and will use the `Model` class to define the outer model -- the object youwill train.For instance, in a ResNet50 model, you would have several ResNet blockssubclassing `Layer`, and a single `Model` encompassing the entire ResNet50network.The `Model` class has the same API as `Layer`, with the following differences:- It exposes built-in training, evaluation, and prediction loops(`model.fit()`, `model.evaluate()`, `model.predict()`).- It exposes the list of its inner layers, via the `model.layers` property.- It exposes saving and serialization APIs (`save()`, `save_weights()`...)Effectively, the `Layer` class corresponds to what we refer to in theliterature as a "layer" (as in "convolution layer" or "recurrent layer") or asa "block" (as in "ResNet block" or "Inception block").Meanwhile, the `Model` class corresponds to what is referred to in theliterature as a "model" (as in "deep learning model") or as a "network" (as in"deep neural network").So if you're wondering, "should I use the `Layer` class or the `Model` class?",ask yourself: will I need to call `fit()` on it? Will I need to call `save()`on it? If so, go with `Model`. If not (either because your class is just a blockin a bigger system, or because you are writing training & saving code yourself),use `Layer`.For instance, we could take our mini-resnet example above, and use it to builda `Model` that we could train with `fit()`, and that we could save with`save_weights()`: ```pythonclass ResNet(tf.keras.Model): def __init__(self, num_classes=1000): super(ResNet, self).__init__() self.block_1 = ResNetBlock() self.block_2 = ResNetBlock() self.global_pool = layers.GlobalAveragePooling2D() self.classifier = Dense(num_classes) def call(self, inputs): x = self.block_1(inputs) x = self.block_2(x) x = self.global_pool(x) return self.classifier(x)resnet = ResNet()dataset = ...resnet.fit(dataset, epochs=10)resnet.save(filepath)``` Putting it all together: an end-to-end exampleHere's what you've learned so far:- A `Layer` encapsulate a state (created in `__init__()` or `build()`) and somecomputation (defined in `call()`).- Layers can be recursively nested to create new, bigger computation blocks.- Layers can create and track losses (typically regularization losses) as wellas metrics, via `add_loss()` and `add_metric()`- The outer container, the thing you want to train, is a `Model`. A `Model` isjust like a `Layer`, but with added training and serialization utilities.Let's put all of these things together into an end-to-end example: we're goingto implement a Variational AutoEncoder (VAE). We'll train it on MNIST digits.Our VAE will be a subclass of `Model`, built as a nested composition of layersthat subclass `Layer`. It will feature a regularization loss (KL divergence).
###Code
from tensorflow.keras import layers
class Sampling(layers.Layer):
"""Uses (z_mean, z_log_var) to sample z, the vector encoding a digit."""
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
class Encoder(layers.Layer):
"""Maps MNIST digits to a triplet (z_mean, z_log_var, z)."""
def __init__(self, latent_dim=32, intermediate_dim=64, name="encoder", **kwargs):
super(Encoder, self).__init__(name=name, **kwargs)
self.dense_proj = layers.Dense(intermediate_dim, activation="relu")
self.dense_mean = layers.Dense(latent_dim)
self.dense_log_var = layers.Dense(latent_dim)
self.sampling = Sampling()
def call(self, inputs):
x = self.dense_proj(inputs)
z_mean = self.dense_mean(x)
z_log_var = self.dense_log_var(x)
z = self.sampling((z_mean, z_log_var))
return z_mean, z_log_var, z
class Decoder(layers.Layer):
"""Converts z, the encoded digit vector, back into a readable digit."""
def __init__(self, original_dim, intermediate_dim=64, name="decoder", **kwargs):
super(Decoder, self).__init__(name=name, **kwargs)
self.dense_proj = layers.Dense(intermediate_dim, activation="relu")
self.dense_output = layers.Dense(original_dim, activation="sigmoid")
def call(self, inputs):
x = self.dense_proj(inputs)
return self.dense_output(x)
class VariationalAutoEncoder(keras.Model):
"""Combines the encoder and decoder into an end-to-end model for training."""
def __init__(
self,
original_dim,
intermediate_dim=64,
latent_dim=32,
name="autoencoder",
**kwargs
):
super(VariationalAutoEncoder, self).__init__(name=name, **kwargs)
self.original_dim = original_dim
self.encoder = Encoder(latent_dim=latent_dim, intermediate_dim=intermediate_dim)
self.decoder = Decoder(original_dim, intermediate_dim=intermediate_dim)
def call(self, inputs):
z_mean, z_log_var, z = self.encoder(inputs)
reconstructed = self.decoder(z)
# Add KL divergence regularization loss.
kl_loss = -0.5 * tf.reduce_mean(
z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1
)
self.add_loss(kl_loss)
return reconstructed
###Output
_____no_output_____
###Markdown
Let's write a simple training loop on MNIST:
###Code
original_dim = 784
vae = VariationalAutoEncoder(original_dim, 64, 32)
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
mse_loss_fn = tf.keras.losses.MeanSquaredError()
loss_metric = tf.keras.metrics.Mean()
(x_train, _), _ = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape(60000, 784).astype("float32") / 255
train_dataset = tf.data.Dataset.from_tensor_slices(x_train)
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
epochs = 2
# Iterate over epochs.
for epoch in range(epochs):
print("Start of epoch %d" % (epoch,))
# Iterate over the batches of the dataset.
for step, x_batch_train in enumerate(train_dataset):
with tf.GradientTape() as tape:
reconstructed = vae(x_batch_train)
# Compute reconstruction loss
loss = mse_loss_fn(x_batch_train, reconstructed)
loss += sum(vae.losses) # Add KLD regularization loss
grads = tape.gradient(loss, vae.trainable_weights)
optimizer.apply_gradients(zip(grads, vae.trainable_weights))
loss_metric(loss)
if step % 100 == 0:
print("step %d: mean loss = %.4f" % (step, loss_metric.result()))
###Output
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11493376/11490434 [==============================] - 0s 0us/step
11501568/11490434 [==============================] - 0s 0us/step
Start of epoch 0
step 0: mean loss = 0.3305
step 100: mean loss = 0.1253
step 200: mean loss = 0.0991
step 300: mean loss = 0.0891
step 400: mean loss = 0.0842
step 500: mean loss = 0.0808
step 600: mean loss = 0.0787
step 700: mean loss = 0.0771
step 800: mean loss = 0.0759
step 900: mean loss = 0.0749
Start of epoch 1
step 0: mean loss = 0.0746
step 100: mean loss = 0.0740
step 200: mean loss = 0.0735
step 300: mean loss = 0.0730
step 400: mean loss = 0.0727
step 500: mean loss = 0.0723
step 600: mean loss = 0.0720
step 700: mean loss = 0.0717
step 800: mean loss = 0.0715
step 900: mean loss = 0.0712
###Markdown
Note that since the VAE is subclassing `Model`, it features built-in trainingloops. So you could also have trained it like this:
###Code
vae = VariationalAutoEncoder(784, 64, 32)
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
vae.compile(optimizer, loss=tf.keras.losses.MeanSquaredError())
vae.fit(x_train, x_train, epochs=2, batch_size=64)
###Output
Epoch 1/2
938/938 [==============================] - 4s 4ms/step - loss: 0.0748
Epoch 2/2
938/938 [==============================] - 4s 4ms/step - loss: 0.0676
###Markdown
Beyond object-oriented development: the Functional APIWas this example too much object-oriented development for you? You can alsobuild models using the [Functional API](https://www.tensorflow.org/guide/keras/functional/). Importantly,choosing one style or another does not prevent you from leveraging componentswritten in the other style: you can always mix-and-match.For instance, the Functional API example below reuses the same `Sampling` layerwe defined in the example above:
###Code
original_dim = 784
intermediate_dim = 64
latent_dim = 32
# Define encoder model.
original_inputs = tf.keras.Input(shape=(original_dim,), name="encoder_input")
x = layers.Dense(intermediate_dim, activation="relu")(original_inputs)
z_mean = layers.Dense(latent_dim, name="z_mean")(x)
z_log_var = layers.Dense(latent_dim, name="z_log_var")(x)
z = Sampling()((z_mean, z_log_var))
encoder = tf.keras.Model(inputs=original_inputs, outputs=z, name="encoder")
# Define decoder model.
latent_inputs = tf.keras.Input(shape=(latent_dim,), name="z_sampling")
x = layers.Dense(intermediate_dim, activation="relu")(latent_inputs)
outputs = layers.Dense(original_dim, activation="sigmoid")(x)
decoder = tf.keras.Model(inputs=latent_inputs, outputs=outputs, name="decoder")
# Define VAE model.
outputs = decoder(z)
vae = tf.keras.Model(inputs=original_inputs, outputs=outputs, name="vae")
# Add KL divergence regularization loss.
kl_loss = -0.5 * tf.reduce_mean(z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1)
vae.add_loss(kl_loss)
# Train.
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
vae.compile(optimizer, loss=tf.keras.losses.MeanSquaredError())
vae.fit(x_train, x_train, epochs=3, batch_size=64)
###Output
Epoch 1/3
938/938 [==============================] - 4s 4ms/step - loss: 0.0746
Epoch 2/3
938/938 [==============================] - 4s 4ms/step - loss: 0.0676
Epoch 3/3
938/938 [==============================] - 4s 4ms/step - loss: 0.0676
###Markdown
Making new Layers and Models via subclassing Learning Objectives* Layer class: the combination of state (weights) and computation.* Defer weight creation until the shape of the inputs is known.* Layers are recursively composable.* Compute loss using add_loss() method.* Compute average using add_metric() method.* Enable serialization on layers. IntroductionThis tutorial shows how to build new layers and models via [subclassing](https://towardsdatascience.com/model-sub-classing-and-custom-training-loop-from-scratch-in-tensorflow-2-cc1d4f10fb4e).__Subclassing__ is a term that refers inheriting properties for a new object from a base or superclass object.Each learning objective will correspond to a __TODO__ in the [student lab notebook](../labs/custom_layers_and_models.ipynb) -- try to complete that notebook first before reviewing this solution notebook. Setup
###Code
# Import necessary libraries
import tensorflow as tf
from tensorflow import keras
###Output
_____no_output_____
###Markdown
The `Layer` class: the combination of state (weights) and some computationOne of the central abstraction in Keras is the `Layer` class. A layerencapsulates both a state (the layer's "weights") and a transformation frominputs to outputs (a "call", the layer's forward pass).Here's a densely-connected layer. It has a state: the variables `w` and `b`.
###Code
# Define a Linear class
class Linear(keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
w_init = tf.random_normal_initializer()
self.w = tf.Variable(
initial_value=w_init(shape=(input_dim, units), dtype="float32"),
trainable=True,
)
b_init = tf.zeros_initializer()
self.b = tf.Variable(
initial_value=b_init(shape=(units,), dtype="float32"), trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
###Output
_____no_output_____
###Markdown
You would use a layer by calling it on some tensor input(s), much like a Pythonfunction.
###Code
x = tf.ones((2, 2))
linear_layer = Linear(4, 2)
y = linear_layer(x)
print(y)
###Output
2021-09-14 13:30:06.886241: I tensorflow/core/common_runtime/process_util.cc:146] Creating new thread pool with default inter op setting: 2. Tune using inter_op_parallelism_threads for best performance.
###Markdown
Note that the weights `w` and `b` are automatically tracked by the layer uponbeing set as layer attributes:
###Code
assert linear_layer.weights == [linear_layer.w, linear_layer.b]
###Output
_____no_output_____
###Markdown
Note you also have access to a quicker shortcut for adding weight to a layer:the `add_weight()` method:
###Code
# TODO
# Use `add_weight()` method for adding weight to a layer
class Linear(keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
self.w = self.add_weight(
shape=(input_dim, units), initializer="random_normal", trainable=True
)
self.b = self.add_weight(shape=(units,), initializer="zeros", trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
x = tf.ones((2, 2))
linear_layer = Linear(4, 2)
y = linear_layer(x)
print(y)
###Output
tf.Tensor(
[[ 0.03132978 -0.00557932 0.02176719 -0.00405929]
[ 0.03132978 -0.00557932 0.02176719 -0.00405929]], shape=(2, 4), dtype=float32)
###Markdown
Layers can have non-trainable weightsBesides trainable weights, you can add non-trainable weights to a layer aswell. Such weights are meant not to be taken into account duringbackpropagation, when you are training the layer.Here's how to add and use a non-trainable weight:
###Code
# Add and use a non-trainable weight
class ComputeSum(keras.layers.Layer):
def __init__(self, input_dim):
super(ComputeSum, self).__init__()
self.total = tf.Variable(initial_value=tf.zeros((input_dim,)), trainable=False)
def call(self, inputs):
self.total.assign_add(tf.reduce_sum(inputs, axis=0))
return self.total
x = tf.ones((2, 2))
my_sum = ComputeSum(2)
y = my_sum(x)
print(y.numpy())
y = my_sum(x)
print(y.numpy())
###Output
[2. 2.]
[4. 4.]
###Markdown
It's part of `layer.weights`, but it gets categorized as a non-trainable weight:
###Code
print("weights:", len(my_sum.weights))
print("non-trainable weights:", len(my_sum.non_trainable_weights))
# It's not included in the trainable weights:
print("trainable_weights:", my_sum.trainable_weights)
###Output
weights: 1
non-trainable weights: 1
trainable_weights: []
###Markdown
Best practice: deferring weight creation until the shape of the inputs is knownOur `Linear` layer above took an `input_dim` argument that was used to computethe shape of the weights `w` and `b` in `__init__()`:
###Code
class Linear(keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
self.w = self.add_weight(
shape=(input_dim, units), initializer="random_normal", trainable=True
)
self.b = self.add_weight(shape=(units,), initializer="zeros", trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
###Output
_____no_output_____
###Markdown
In many cases, you may not know in advance the size of your inputs, and youwould like to lazily create weights when that value becomes known, some timeafter instantiating the layer.In the Keras API, we recommend creating layer weights in the `build(self, input_shape)` method of your layer. Like this:
###Code
# TODO
class Linear(keras.layers.Layer):
def __init__(self, units=32):
super(Linear, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
###Output
_____no_output_____
###Markdown
The `__call__()` method of your layer will automatically run build the first timeit is called. You now have a layer that's lazy and thus easier to use:
###Code
# At instantiation, we don't know on what inputs this is going to get called
linear_layer = Linear(32)
# The layer's weights are created dynamically the first time the layer is called
y = linear_layer(x)
###Output
_____no_output_____
###Markdown
Layers are recursively composableIf you assign a Layer instance as an attribute of another Layer, the outer layerwill start tracking the weights of the inner layer.We recommend creating such sublayers in the `__init__()` method (since thesublayers will typically have a build method, they will be built when theouter layer gets built).
###Code
# TODO
# Let's assume we are reusing the Linear class
# with a `build` method that we defined above.
class MLPBlock(keras.layers.Layer):
def __init__(self):
super(MLPBlock, self).__init__()
self.linear_1 = Linear(32)
self.linear_2 = Linear(32)
self.linear_3 = Linear(1)
def call(self, inputs):
x = self.linear_1(inputs)
x = tf.nn.relu(x)
x = self.linear_2(x)
x = tf.nn.relu(x)
return self.linear_3(x)
mlp = MLPBlock()
y = mlp(tf.ones(shape=(3, 64))) # The first call to the `mlp` will create the weights
print("weights:", len(mlp.weights))
print("trainable weights:", len(mlp.trainable_weights))
###Output
weights: 6
trainable weights: 6
###Markdown
The `add_loss()` methodWhen writing the `call()` method of a layer, you can create loss tensors thatyou will want to use later, when writing your training loop. This is doable bycalling `self.add_loss(value)`:
###Code
# A layer that creates an activity regularization loss
class ActivityRegularizationLayer(keras.layers.Layer):
def __init__(self, rate=1e-2):
super(ActivityRegularizationLayer, self).__init__()
self.rate = rate
def call(self, inputs):
self.add_loss(self.rate * tf.reduce_sum(inputs))
return inputs
###Output
_____no_output_____
###Markdown
These losses (including those created by any inner layer) can be retrieved via`layer.losses`. This property is reset at the start of every `__call__()` tothe top-level layer, so that `layer.losses` always contains the loss valuescreated during the last forward pass.
###Code
# TODO
class OuterLayer(keras.layers.Layer):
def __init__(self):
super(OuterLayer, self).__init__()
self.activity_reg = ActivityRegularizationLayer(1e-2)
def call(self, inputs):
return self.activity_reg(inputs)
layer = OuterLayer()
assert len(layer.losses) == 0 # No losses yet since the layer has never been called
_ = layer(tf.zeros(1, 1))
assert len(layer.losses) == 1 # We created one loss value
# `layer.losses` gets reset at the start of each __call__
_ = layer(tf.zeros(1, 1))
assert len(layer.losses) == 1 # This is the loss created during the call above
###Output
_____no_output_____
###Markdown
In addition, the `loss` property also contains regularization losses createdfor the weights of any inner layer:
###Code
class OuterLayerWithKernelRegularizer(keras.layers.Layer):
def __init__(self):
super(OuterLayerWithKernelRegularizer, self).__init__()
self.dense = keras.layers.Dense(
32, kernel_regularizer=tf.keras.regularizers.l2(1e-3)
)
def call(self, inputs):
return self.dense(inputs)
layer = OuterLayerWithKernelRegularizer()
_ = layer(tf.zeros((1, 1)))
# This is `1e-3 * sum(layer.dense.kernel ** 2)`,
# created by the `kernel_regularizer` above.
print(layer.losses)
###Output
[<tf.Tensor: shape=(), dtype=float32, numpy=0.0020471246>]
###Markdown
These losses are meant to be taken into account when writing training loops,like this:```python Instantiate an optimizer.optimizer = tf.keras.optimizers.SGD(learning_rate=1e-3)loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True) Iterate over the batches of a dataset.for x_batch_train, y_batch_train in train_dataset: with tf.GradientTape() as tape: logits = layer(x_batch_train) Logits for this minibatch Loss value for this minibatch loss_value = loss_fn(y_batch_train, logits) Add extra losses created during this forward pass: loss_value += sum(model.losses) grads = tape.gradient(loss_value, model.trainable_weights) optimizer.apply_gradients(zip(grads, model.trainable_weights))``` For a detailed guide about writing training loops, see the[guide to writing a training loop from scratch](https://www.tensorflow.org/guide/keras/writing_a_training_loop_from_scratch/).These losses also work seamlessly with `fit()` (they get automatically summedand added to the main loss, if any):
###Code
import numpy as np
inputs = keras.Input(shape=(3,))
outputs = ActivityRegularizationLayer()(inputs)
model = keras.Model(inputs, outputs)
# If there is a loss passed in `compile`, the regularization
# losses get added to it
model.compile(optimizer="adam", loss="mse")
model.fit(np.random.random((2, 3)), np.random.random((2, 3)))
# It's also possible not to pass any loss in `compile`,
# since the model already has a loss to minimize, via the `add_loss`
# call during the forward pass!
model.compile(optimizer="adam")
model.fit(np.random.random((2, 3)), np.random.random((2, 3)))
###Output
2021-09-14 13:30:07.699809: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:185] None of the MLIR Optimization Passes are enabled (registered 2)
###Markdown
The `add_metric()` methodSimilarly to `add_loss()`, layers also have an `add_metric()` methodfor tracking the moving average of a quantity during training.Consider the following layer: a "logistic endpoint" layer.It takes as inputs predictions & targets, it computes a loss which it tracksvia `add_loss()`, and it computes an accuracy scalar, which it tracks via`add_metric()`.
###Code
# TODO
class LogisticEndpoint(keras.layers.Layer):
def __init__(self, name=None):
super(LogisticEndpoint, self).__init__(name=name)
self.loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
self.accuracy_fn = keras.metrics.BinaryAccuracy()
def call(self, targets, logits, sample_weights=None):
# Compute the training-time loss value and add it
# to the layer using `self.add_loss()`.
loss = self.loss_fn(targets, logits, sample_weights)
self.add_loss(loss)
# Log accuracy as a metric and add it
# to the layer using `self.add_metric()`.
acc = self.accuracy_fn(targets, logits, sample_weights)
self.add_metric(acc, name="accuracy")
# Return the inference-time prediction tensor (for `.predict()`).
return tf.nn.softmax(logits)
###Output
_____no_output_____
###Markdown
Metrics tracked in this way are accessible via `layer.metrics`:
###Code
layer = LogisticEndpoint()
targets = tf.ones((2, 2))
logits = tf.ones((2, 2))
y = layer(targets, logits)
print("layer.metrics:", layer.metrics)
print("current accuracy value:", float(layer.metrics[0].result()))
###Output
layer.metrics: [<keras.metrics.BinaryAccuracy object at 0x7eff0931ac10>]
current accuracy value: 1.0
###Markdown
Just like for `add_loss()`, these metrics are tracked by `fit()`:
###Code
inputs = keras.Input(shape=(3,), name="inputs")
targets = keras.Input(shape=(10,), name="targets")
logits = keras.layers.Dense(10)(inputs)
predictions = LogisticEndpoint(name="predictions")(logits, targets)
model = keras.Model(inputs=[inputs, targets], outputs=predictions)
model.compile(optimizer="adam")
data = {
"inputs": np.random.random((3, 3)),
"targets": np.random.random((3, 10)),
}
model.fit(data)
###Output
1/1 [==============================] - 0s 338ms/step - loss: 1.1031 - binary_accuracy: 0.0000e+00
###Markdown
You can optionally enable serialization on your layersIf you need your custom layers to be serializable as part of a[Functional model](https://www.tensorflow.org/guide/keras/functional/), you can optionally implement a `get_config()`method:
###Code
# TODO
class Linear(keras.layers.Layer):
def __init__(self, units=32):
super(Linear, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
def get_config(self):
return {"units": self.units}
# You can enable serialization on your layers using `get_config()` method
# Now you can recreate the layer from its config:
layer = Linear(64)
config = layer.get_config()
print(config)
new_layer = Linear.from_config(config)
###Output
{'units': 64}
###Markdown
Note that the `__init__()` method of the base `Layer` class takes some keywordarguments, in particular a `name` and a `dtype`. It's good practice to passthese arguments to the parent class in `__init__()` and to include them in thelayer config:
###Code
class Linear(keras.layers.Layer):
def __init__(self, units=32, **kwargs):
super(Linear, self).__init__(**kwargs)
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
def get_config(self):
config = super(Linear, self).get_config()
config.update({"units": self.units})
return config
layer = Linear(64)
config = layer.get_config()
print(config)
new_layer = Linear.from_config(config)
###Output
{'name': 'linear_8', 'trainable': True, 'dtype': 'float32', 'units': 64}
###Markdown
If you need more flexibility when deserializing the layer from its config, youcan also override the `from_config()` class method. This is the baseimplementation of `from_config()`:```pythondef from_config(cls, config): return cls(**config)```To learn more about serialization and saving, see the complete[guide to saving and serializing models](https://www.tensorflow.org/guide/keras/save_and_serialize/). Privileged `training` argument in the `call()` methodSome layers, in particular the `BatchNormalization` layer and the `Dropout`layer, have different behaviors during training and inference. For suchlayers, it is standard practice to expose a `training` (boolean) argument inthe `call()` method.By exposing this argument in `call()`, you enable the built-in training andevaluation loops (e.g. `fit()`) to correctly use the layer in training andinference.
###Code
class CustomDropout(keras.layers.Layer):
def __init__(self, rate, **kwargs):
super(CustomDropout, self).__init__(**kwargs)
self.rate = rate
def call(self, inputs, training=None):
if training:
return tf.nn.dropout(inputs, rate=self.rate)
return inputs
###Output
_____no_output_____
###Markdown
Privileged `mask` argument in the `call()` methodThe other privileged argument supported by `call()` is the `mask` argument.You will find it in all Keras RNN layers. A mask is a boolean tensor (oneboolean value per timestep in the input) used to skip certain input timestepswhen processing timeseries data.Keras will automatically pass the correct `mask` argument to `__call__()` forlayers that support it, when a mask is generated by a prior layer.Mask-generating layers are the `Embedding`layer configured with `mask_zero=True`, and the `Masking` layer.To learn more about masking and how to write masking-enabled layers, pleasecheck out the guide["understanding padding and masking"](https://www.tensorflow.org/guide/keras/masking_and_padding/). The `Model` classIn general, you will use the `Layer` class to define inner computation blocks,and will use the `Model` class to define the outer model -- the object youwill train.For instance, in a ResNet50 model, you would have several ResNet blockssubclassing `Layer`, and a single `Model` encompassing the entire ResNet50network.The `Model` class has the same API as `Layer`, with the following differences:- It exposes built-in training, evaluation, and prediction loops(`model.fit()`, `model.evaluate()`, `model.predict()`).- It exposes the list of its inner layers, via the `model.layers` property.- It exposes saving and serialization APIs (`save()`, `save_weights()`...)Effectively, the `Layer` class corresponds to what we refer to in theliterature as a "layer" (as in "convolution layer" or "recurrent layer") or asa "block" (as in "ResNet block" or "Inception block").Meanwhile, the `Model` class corresponds to what is referred to in theliterature as a "model" (as in "deep learning model") or as a "network" (as in"deep neural network").So if you're wondering, "should I use the `Layer` class or the `Model` class?",ask yourself: will I need to call `fit()` on it? Will I need to call `save()`on it? If so, go with `Model`. If not (either because your class is just a blockin a bigger system, or because you are writing training & saving code yourself),use `Layer`.For instance, we could take our mini-resnet example above, and use it to builda `Model` that we could train with `fit()`, and that we could save with`save_weights()`: ```pythonclass ResNet(tf.keras.Model): def __init__(self, num_classes=1000): super(ResNet, self).__init__() self.block_1 = ResNetBlock() self.block_2 = ResNetBlock() self.global_pool = layers.GlobalAveragePooling2D() self.classifier = Dense(num_classes) def call(self, inputs): x = self.block_1(inputs) x = self.block_2(x) x = self.global_pool(x) return self.classifier(x)resnet = ResNet()dataset = ...resnet.fit(dataset, epochs=10)resnet.save(filepath)``` Putting it all together: an end-to-end exampleHere's what you've learned so far:- A `Layer` encapsulate a state (created in `__init__()` or `build()`) and somecomputation (defined in `call()`).- Layers can be recursively nested to create new, bigger computation blocks.- Layers can create and track losses (typically regularization losses) as wellas metrics, via `add_loss()` and `add_metric()`- The outer container, the thing you want to train, is a `Model`. A `Model` isjust like a `Layer`, but with added training and serialization utilities.Let's put all of these things together into an end-to-end example: we're goingto implement a Variational AutoEncoder (VAE). We'll train it on MNIST digits.Our VAE will be a subclass of `Model`, built as a nested composition of layersthat subclass `Layer`. It will feature a regularization loss (KL divergence).
###Code
from tensorflow.keras import layers
class Sampling(layers.Layer):
"""Uses (z_mean, z_log_var) to sample z, the vector encoding a digit."""
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
class Encoder(layers.Layer):
"""Maps MNIST digits to a triplet (z_mean, z_log_var, z)."""
def __init__(self, latent_dim=32, intermediate_dim=64, name="encoder", **kwargs):
super(Encoder, self).__init__(name=name, **kwargs)
self.dense_proj = layers.Dense(intermediate_dim, activation="relu")
self.dense_mean = layers.Dense(latent_dim)
self.dense_log_var = layers.Dense(latent_dim)
self.sampling = Sampling()
def call(self, inputs):
x = self.dense_proj(inputs)
z_mean = self.dense_mean(x)
z_log_var = self.dense_log_var(x)
z = self.sampling((z_mean, z_log_var))
return z_mean, z_log_var, z
class Decoder(layers.Layer):
"""Converts z, the encoded digit vector, back into a readable digit."""
def __init__(self, original_dim, intermediate_dim=64, name="decoder", **kwargs):
super(Decoder, self).__init__(name=name, **kwargs)
self.dense_proj = layers.Dense(intermediate_dim, activation="relu")
self.dense_output = layers.Dense(original_dim, activation="sigmoid")
def call(self, inputs):
x = self.dense_proj(inputs)
return self.dense_output(x)
class VariationalAutoEncoder(keras.Model):
"""Combines the encoder and decoder into an end-to-end model for training."""
def __init__(
self,
original_dim,
intermediate_dim=64,
latent_dim=32,
name="autoencoder",
**kwargs
):
super(VariationalAutoEncoder, self).__init__(name=name, **kwargs)
self.original_dim = original_dim
self.encoder = Encoder(latent_dim=latent_dim, intermediate_dim=intermediate_dim)
self.decoder = Decoder(original_dim, intermediate_dim=intermediate_dim)
def call(self, inputs):
z_mean, z_log_var, z = self.encoder(inputs)
reconstructed = self.decoder(z)
# Add KL divergence regularization loss.
kl_loss = -0.5 * tf.reduce_mean(
z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1
)
self.add_loss(kl_loss)
return reconstructed
###Output
_____no_output_____
###Markdown
Let's write a simple training loop on MNIST:
###Code
original_dim = 784
vae = VariationalAutoEncoder(original_dim, 64, 32)
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
mse_loss_fn = tf.keras.losses.MeanSquaredError()
loss_metric = tf.keras.metrics.Mean()
(x_train, _), _ = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape(60000, 784).astype("float32") / 255
train_dataset = tf.data.Dataset.from_tensor_slices(x_train)
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
epochs = 2
# Iterate over epochs.
for epoch in range(epochs):
print("Start of epoch %d" % (epoch,))
# Iterate over the batches of the dataset.
for step, x_batch_train in enumerate(train_dataset):
with tf.GradientTape() as tape:
reconstructed = vae(x_batch_train)
# Compute reconstruction loss
loss = mse_loss_fn(x_batch_train, reconstructed)
loss += sum(vae.losses) # Add KLD regularization loss
grads = tape.gradient(loss, vae.trainable_weights)
optimizer.apply_gradients(zip(grads, vae.trainable_weights))
loss_metric(loss)
if step % 100 == 0:
print("step %d: mean loss = %.4f" % (step, loss_metric.result()))
###Output
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11493376/11490434 [==============================] - 0s 0us/step
11501568/11490434 [==============================] - 0s 0us/step
Start of epoch 0
step 0: mean loss = 0.3305
step 100: mean loss = 0.1253
step 200: mean loss = 0.0991
step 300: mean loss = 0.0891
step 400: mean loss = 0.0842
step 500: mean loss = 0.0808
step 600: mean loss = 0.0787
step 700: mean loss = 0.0771
step 800: mean loss = 0.0759
step 900: mean loss = 0.0749
Start of epoch 1
step 0: mean loss = 0.0746
step 100: mean loss = 0.0740
step 200: mean loss = 0.0735
step 300: mean loss = 0.0730
step 400: mean loss = 0.0727
step 500: mean loss = 0.0723
step 600: mean loss = 0.0720
step 700: mean loss = 0.0717
step 800: mean loss = 0.0715
step 900: mean loss = 0.0712
###Markdown
Note that since the VAE is subclassing `Model`, it features built-in trainingloops. So you could also have trained it like this:
###Code
vae = VariationalAutoEncoder(784, 64, 32)
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
vae.compile(optimizer, loss=tf.keras.losses.MeanSquaredError())
vae.fit(x_train, x_train, epochs=2, batch_size=64)
###Output
Epoch 1/2
938/938 [==============================] - 4s 4ms/step - loss: 0.0748
Epoch 2/2
938/938 [==============================] - 4s 4ms/step - loss: 0.0676
###Markdown
Beyond object-oriented development: the Functional APIWas this example too much object-oriented development for you? You can alsobuild models using the [Functional API](https://www.tensorflow.org/guide/keras/functional/). Importantly,choosing one style or another does not prevent you from leveraging componentswritten in the other style: you can always mix-and-match.For instance, the Functional API example below reuses the same `Sampling` layerwe defined in the example above:
###Code
original_dim = 784
intermediate_dim = 64
latent_dim = 32
# Define encoder model.
original_inputs = tf.keras.Input(shape=(original_dim,), name="encoder_input")
x = layers.Dense(intermediate_dim, activation="relu")(original_inputs)
z_mean = layers.Dense(latent_dim, name="z_mean")(x)
z_log_var = layers.Dense(latent_dim, name="z_log_var")(x)
z = Sampling()((z_mean, z_log_var))
encoder = tf.keras.Model(inputs=original_inputs, outputs=z, name="encoder")
# Define decoder model.
latent_inputs = tf.keras.Input(shape=(latent_dim,), name="z_sampling")
x = layers.Dense(intermediate_dim, activation="relu")(latent_inputs)
outputs = layers.Dense(original_dim, activation="sigmoid")(x)
decoder = tf.keras.Model(inputs=latent_inputs, outputs=outputs, name="decoder")
# Define VAE model.
outputs = decoder(z)
vae = tf.keras.Model(inputs=original_inputs, outputs=outputs, name="vae")
# Add KL divergence regularization loss.
kl_loss = -0.5 * tf.reduce_mean(z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1)
vae.add_loss(kl_loss)
# Train.
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
vae.compile(optimizer, loss=tf.keras.losses.MeanSquaredError())
vae.fit(x_train, x_train, epochs=3, batch_size=64)
###Output
Epoch 1/3
938/938 [==============================] - 4s 4ms/step - loss: 0.0746
Epoch 2/3
938/938 [==============================] - 4s 4ms/step - loss: 0.0676
Epoch 3/3
938/938 [==============================] - 4s 4ms/step - loss: 0.0676
|
chapter_3_exercises.ipynb | ###Markdown
**Exercise 1)**
###Code
s = 'colorless'
s = s[:4] + 'u' + s[4:]
s
###Output
_____no_output_____
###Markdown
**Exercise 2)**
###Code
'dishes'[:-2]
'running'[:-4]
'nationality'[:-5]
'undo'[2:]
'preheat'[3:]
###Output
_____no_output_____
###Markdown
**Exercise 3)**
###Code
'in'[-5]
###Output
_____no_output_____
###Markdown
**Exercise 4)**
###Code
monty = 'Monty Python'
monty[6:11:2]
monty[10:5:-2]
monty[1:10:-2]
monty[1:6:1.5]
###Output
_____no_output_____
###Markdown
**Exercise 5)**
###Code
monty[::-1]
###Output
_____no_output_____
###Markdown
**Exercise 6)**
###Code
from __future__ import division
import nltk, re, pprint
# a - one or more letters
nltk.re_show(r'[a-zA-Z]+', monty)
# b - one capital letter and zero or more lowercase letters
nltk.re_show(r'[A-Z][a-z]*', monty)
nltk.re_show(r'[A-Z][a-z]*', 'A very Intersting3 example')
# c - a word starting with p, followed by 0 up to 2 vowels and ending with p
nltk.re_show(r'p[aeiou]{,2}t', 'two pouting party pets - pt')
# d - integer or decimal number
nltk.re_show(r'\d+(\.\d+)?', 'This should match 23 as well as 1.093 and 999.9')
# e - zero or more sequences of not-a-vowel - vowel - not-a-vowel
nltk.re_show(r'([^aeiou][aeiou][^aeiou])*', 'This should match pet as well as cut and lol')
# f - one or more alphanumeric characters or one or more charcters that are neither alpahnumeric nor whitespace
nltk.re_show(r'\w+|[^\w\s]+', 'should match me but not \n')
###Output
{should} {match} {me} {but} {not}
###Markdown
**Exercise 7)**
###Code
a = r'^(the|a|an)$'
nltk.re_show(a, 'the something')
nltk.re_show(a, 'the')
nltk.re_show(a, 'an')
nltk.re_show(a, 'anything')
b = r'\d+([\+\*]\d+)+'
nltk.re_show(b, 'something+2')
nltk.re_show(b, '2*3+8')
nltk.re_show(b, '200+5000')
nltk.re_show(b, '2*3+8-5/6')
###Output
something+2
{2*3+8}
{200+5000}
{2*3+8}-5/6
###Markdown
**Exercise 8)**
###Code
from bs4 import BeautifulSoup
import urllib
def getContentFromURL(url):
raw = urllib.urlopen(url).read()
soup = BeautifulSoup(raw)
return soup.get_text()
getContentFromURL('http://www.nltk.org/')
###Output
c:\python27\lib\site-packages\bs4\__init__.py:181: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system ("html.parser"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.
The code that caused this warning is on line 174 of the file c:\python27\lib\runpy.py. To get rid of this warning, change code that looks like this:
BeautifulSoup([your markup])
to this:
BeautifulSoup([your markup], "html.parser")
markup_type=markup_type))
###Markdown
**Exercise 9)**
###Code
def load(fileName):
f = open(fileName + '.txt')
return f.read()
corpusText = load('corpus')
# a
pattern = r'''(?x)
[\.,;"'?\(\):\-_`\[\]\{\}]+ # one or more punctuation symbols, brackets etc.
'''
print nltk.regexp_tokenize(corpusText, pattern)
# b
pattern = r'''(?x)
(?:\d+\.)?\d+\s?\$ # Monetary amount like 2.40$
| \$\s?(?:\d+\.)?\d+ # Monetary amount like $2.40
| \d{4}\-\d{2}\-\d{2} # Date like 2016-22-01
| \d{1,2}\s[A-Z][a-z]{2,8}\s\d{4} # Date like 2 March 1998
| [A-Z][a-z]+(?:\s[A-Z][a-z]+)? # Proper Names - TODO: don't match beginning of sentence
'''
testString = 'should match 3.50$ or 8 $ or 9$ or $2.40 or 2016-11-01 or 2 March 1998 or 19 January 2001 or Sam or United Nations'
print nltk.regexp_tokenize(testString, pattern)
print nltk.regexp_tokenize(corpusText, pattern)
###Output
['Web', 'Based', 'Assessment Beyond', 'Multiple', 'Choice', 'The Application', 'Technologies', 'Different Testing', 'Formats', 'Documentation', 'Master', 'Thesis', 'Linguistics', 'Web Technology', 'Faculty', 'Foreign Languages', 'Cultures', 'Philipps', 'Universit', 'Marburg', 'Julia Neumann', 'Naumburg', 'Germany', 'Marburg', 'Contents', 'List', 'Abbreviations', 'Introduction', 'User Guide', 'Overall Organization', 'Code', 'General Design', 'Java', 'Script Components', 'Implementation', 'Testing Formats', 'Crossword', 'Dynamic Multiple', 'Choice', 'Drag', 'Drop', 'Database Structure', 'General Features', 'Index Page', 'Contact Page', 'Color Changer', 'Inline Editing', 'Deletion', 'Exporting Tests', 'References', 'Appendix', 'Database Structure', 'Declaration', 'Authorship', 'List', 'Abbreviations', 'Asynchronous Java', 'Script', 'Cascading Style', 'Sheets', 'Document Object', 'Model', 'Hypertext Markup', 'Language', 'Joint Photographic', 'Experts Group', 'Model', 'View', 'Controller\nMy', 'Li', 'My', 'Improved', 'Hypertext Preprocessor', 'Portable Network', 'Graphics', 'Structured Query', 'Language', 'Scalable Vector', 'Graphics', 'Extensible Markup', 'Language', 'Introduction\nThis', 'The', 'Thus', 'These', 'Users', 'Modern', 'While', 'Java', 'Script', 'The', 'Moreover', 'Before', 'User Guide', 'The', 'This', 'Hovering', 'Apart', 'They', 'The', 'By', 'Clicking', 'To', 'Therefore', 'Instead', 'Home', 'Clicking', 'The', 'Short', 'In', 'Elements', 'This', 'Pressing', 'Enter', 'Elements', 'Clicking', 'When', 'Furthermore', 'Running', 'The', 'Check', 'While', 'Overall Organization', 'Code\nThe', 'Java', 'Script', 'The', 'Furthermore', 'On', 'Its', 'My', 'Li', 'Objects', 'Furthermore', 'Lastly', 'It', 'Another', 'These', 'The', 'Java', 'Script', 'Java', 'Script', 'Aside', 'For', 'These', 'Java', 'Script', 'The', 'The', 'Java', 'Script', 'These', 'Furthermore', 'This', 'Java', 'Script', 'Finally', 'Apart', 'This', 'Java', 'Script', 'It', 'Java', 'Script', 'As', 'Java', 'Script', 'Java', 'Script', 'Therefore', 'Java', 'Script', 'General Design', 'Java', 'Script Components', 'As', 'Java', 'Script', 'Model', 'View', 'Controller', 'The', 'Combining', 'Java', 'Script', 'The', 'Java', 'Script', 'Test', 'Test', 'Item', 'View', 'Control', 'These', 'View', 'In', 'Java', 'Script', 'Test', 'View', 'Control', 'The', 'Test', 'Item', 'Question', 'Item', 'Container', 'These', 'While', 'View', 'When', 'The', 'Whenever', 'Implementation', 'Testing Formats', 'While', 'Java', 'Script', 'Crossword\nFor', 'Answers', 'English', 'Once', 'The', 'Java', 'Script', 'Thus', 'The', 'Given', 'Then', 'For', 'If', 'Once', 'This', 'If', 'The', 'Thus', 'First', 'This', 'Second', 'Should', 'The', 'As', 'Web Worker', 'This', 'Before', 'Web Workers', 'In', 'Once', 'Java', 'Script', 'When', 'Apart', 'Simply', 'An', 'Upon', 'Dynamic Multiple', 'Choice\nWhile', 'As', 'Thus', 'This', 'As', 'By', 'It', 'For', 'In', 'As', 'For', 'Here', 'Simple', 'Apart', 'As', 'Java', 'Script', 'This', 'Lastly', 'Thus', 'When', 'This', 'Drag', 'Drop\nThe', 'While', 'The', 'The', 'Thus', 'Once', 'Elements', 'Dragenter', 'When', 'The', 'Finally', 'The', 'In', 'This', 'Another', 'Learners', 'In', 'Java', 'Script', 'Database Structure', 'Complementing', 'My', 'For', 'Appendix', 'The', 'On', 'The', 'This', 'The', 'Ds', 'Similarly', 'Ds', 'This', 'On', 'The', 'Each', 'Item', 'The', 'As', 'The', 'Thus', 'Lastly', 'The', 'This', 'Strictly', 'Storing', 'It', 'Therefore', 'General Features', 'Apart', 'The', 'Index Page', 'The', 'This', 'The', 'More', 'Assuming', 'The', 'The', 'As', 'Therefore', 'First', 'Second', 'Java', 'Script', 'This', 'The', 'Contact Page', 'There', 'The', 'Apart', 'Internet', 'As', 'Color Changer', 'In', 'This', 'This', 'Changing', 'Java', 'Script', 'In', 'As', 'This', 'Local Storage', 'Each', 'Whenever', 'If', 'This', 'Inline Editing', 'Deletion\nAnother', 'While', 'Displaying', 'Therefore', 'For', 'The', 'View', 'Thus', 'When', 'Also', 'Following', 'These', 'As', 'Exporting Tests', 'Administrators', 'As', 'The', 'Basically', 'When', 'Download', 'Download', 'Print Test', 'Object', 'This', 'Next', 'Blob', 'This Blob', 'As', 'Object', 'Blob', 'The', 'If', 'Once', 'If', 'Data', 'This', 'In', 'The', 'It', 'First', 'Internet Explorer', 'Object', 'The', 'Second', 'Data', 'Blob', 'Chrome', 'Opera', 'Thus', 'Firefox', 'Printing', 'Chrome', 'Opera', 'Firefox', 'Again', 'As', 'This', 'Local Storage', 'Web Workers', 'Web', 'References\nInternet', 'Sources', '06 May 2015', '06 May 2015', '06 May 2015', '11 May 2015', '07 May 2015', '07 May 2015', '11 May 2015', 'Web', 'Element', 'Object', '12 May 2015', '11 May 2015', 'Further Internet', 'Sources', '07 May 2015', '07 May 2015', '07 May 2015', '07 May 2015', 'Appendix', 'Database Structure', 'Declaration', 'Authorship', 'Master', 'Thesis', 'Master', 'Arts', 'Web', 'Based', 'Assessment Beyond', 'Multiple', 'Choice', 'The Application', 'Technologies', 'Different Testing', 'Formats', 'No', 'All', 'The', 'Internet', 'Marburg', '19 May 2015', 'Markup Languages', 'Human Language', 'Technologies', 'Three Examples', 'Julia Neumann', 'Paper', 'Human Language', 'Technologies', 'Winter Term', 'Submission Date', '17 December 2013', 'Approved', 'Dr', 'Peter Franke', 'Philipps University', 'Marburg\nContents', 'List', 'Abbreviations', 'Introduction', 'Overview', 'Markup Language', 'Advantages', 'Applications', 'Languages', 'Conclusions', 'References', 'Appendix', 'Example', 'Example', 'Example', 'Example', 'List', 'Abbreviations', 'Artificial Intelligence', 'Markup Language', 'Document Type', 'Definition', 'Human Language', 'Technologies', 'Hypertext Markup', 'Language', 'Natural Language', 'Processing', 'Web Ontology', 'Language', 'Standard Generalized', 'Markup Language', 'World Wide', 'Web Consortium', 'Extensible Markup', 'Language', 'Path', 'Path Language', 'Extensible Stylesheet', 'Language Transformations', 'Introduction', 'Overview', 'The', 'In', 'Furthermore', 'But', 'Extensible Markup', 'Language', 'The', 'Lobin', 'Its', 'Markup Language', 'In', 'Markup', 'Lobin', 'However', 'This', 'Lobin', 'More', 'Advantages', 'Applications', 'The', 'Firstly', 'Secondly', 'Moreover', 'These', 'As', 'Schwartzbach', 'Languages', 'Considering', 'Artificial Intelligence', 'Markup Language', 'Web Ontology', 'Language', 'Extensible Stylesheet', 'Transformations', 'Our', 'It', 'The', 'Each', 'These', 'For', 'Furthermore', 'Other', 'These', 'Contemporary', 'Fialho', 'Silvervarg', 'As Bii', 'While', 'Silvervarg', 'It', 'Semantic Web', 'Web', 'Such', 'In', 'The', 'The', 'Class', 'By', 'To', 'Web', 'In', 'Androutsopoulos', 'Another', 'Sateli', 'Semantic Assistants', 'Witte', 'Gitzinger', 'Here', 'After', 'Being', 'Lobin', 'These', 'In', 'More', 'Lobin', 'The', 'This', 'Path', 'For', 'Gill', 'Similarly', 'Of', 'Sch', 'Weitz', 'Conclusions\nThe', 'Of', 'Using', 'As', 'Still', 'Other', 'Other', 'Furthermore', 'However', 'Moreover', 'Path', 'Still', 'Lobin', 'Although', 'Due', 'References\nAndroutsopoulos', 'Ion', 'Lampouras', 'Gerasimos', 'Galanis', 'Dimitros', 'Generating Natural', 'Language Descriptions', 'Ontologies', 'Natural', 'System', 'In', 'Journal', 'Artificial Intelligence', 'Research', 'November', 'Available Online', 'Last', '30 November 2013', 'Bii', 'Patrick', 'Chatbot Technology', 'Possible Means', 'Unlocking Student', 'Potential', 'Learn How', 'Learn', 'In', 'Educational Research', 'Februar', 'Available Online', 'Last', '30 November 2013', 'Fialho', 'Pedro', 'Coheur', 'Lu', 'Curto', 'Costa', 'Pedro', 'Abad', 'Alberto', 'Meinedo', 'Hugo', 'Trancoso', 'Isabel', 'Meet', 'In', 'Proceedings', 'Annual Meeting', 'Association', 'Computational Linguistics', 'Sofia', 'Bulgaria', '9 August 2013', 'Available Online', 'Last', '28 November 2013', 'Gill', 'Alastair', 'Brockmann', 'Carsten', 'Oberlander', 'Jon', 'Perceptions', 'Alignment', 'Personality', 'Generated Dialogue', 'In', 'Proceedings', 'International Natural', 'Language Generation', 'Conference', 'May', '1 June 2012', 'Utica', 'Available Online', 'Last', '29 November 2013', 'Lobin', 'Henning', 'Computerlinguistik', 'Texttechnologie', 'Paderborn', 'Fink', 'Anders', 'Schwartzbach', 'Michael', 'An Introduction', 'Web Technologies', 'Harlow', 'Addison', 'Wesley', 'Sateli', 'Bahar', 'Cook', 'Gina', 'Witte', 'Ren', 'Smarter Mobile', 'Apps', 'Integrated Natural', 'Language Processing', 'Services', 'In', 'Proceedings', 'International Conference', 'Mobile Web', 'Information Systems', 'Paphos', 'Cyprus', '28 August 2013', 'Available Online', 'Last', '30 November 2013', 'Sch', 'Ulrich', 'Weitz', 'Benjamin', 'Combining', 'Outputs', 'Logical Document', 'Structure Markup', 'Technical Background', 'Contributed Task', 'In', 'Proceedings', 'Special Workshop', 'Rediscovering', 'Years', 'Discoveries', 'Jeju', 'Republic', 'Korea', '10 July 2012', 'Available Online', 'Last', '30 November 2013', 'Silvervarg', 'Annika', 'Arne', 'Iterative Development', 'Evaluation', 'Social Conversational', 'Agent', 'In', 'Proceedings', 'International Joint', 'Conference', 'Natural Language', 'Processing', 'Nagoya', 'Japan', '18 October 2013', 'Available Online', 'Last', '28 November 2013', 'Witte', 'Ren', 'Gitzinger', 'Thomes', 'Semantic Assistants', 'User', 'Centric Natural', 'Language Processing', 'Services', 'Desktop Clients', 'In', 'Proceedings', 'Asian Semantic', 'Web Conference', 'Bangkok', 'Thailand', '11 December 2008', 'Available', 'Online', 'Last', '30 November 2013', 'Internet Sources', '21 November 2013', '23 November 2013', '28 November 2013', '28 November 2013', '28 November 2013', '30 November 2013', '30 November 2013', 'Web', 'Ontology', 'Language', '30 November 2013', '29 November 2013', '29 November 2013', '30 November 2013', 'Procedural', '6 December 2013', '7 December 2013', 'Appendix\nThis', 'It', 'Example\nThe', 'Lobin', 'In', 'Within', 'Lobin', 'The', 'Lobin', 'Example', 'This', 'When', 'This', 'Do', 'Star Wars', 'No', 'The', 'Which', 'Why', 'Star Wars', 'The', 'The', 'It', 'Example\nThe', 'Class', 'Class', 'Of', 'Class', 'Thing', 'There', 'Thing', 'Thing', 'The', 'Object', 'Property', 'Color', 'Object', 'Property', 'Here', 'It', 'Color', 'Thing', 'Color', 'Thing', 'The', 'Example\nIn', 'Lobin', 'The', 'Hi', 'How', 'In', 'User', 'Chatbot', 'Hi', 'How', 'For', 'This', 'User', 'Parts', 'The', 'Path', 'Chatbot', 'The', 'An', 'Uber', 'Exceptional', 'Perspective', 'Borrowing', 'Corpus', 'Based Case', 'Study', 'German Loan', 'Morpheme', 'Present', 'Day English', 'Julia Neumann', 'Paper', 'The New', 'Media', 'Linguistics', 'Corpus Linguistics', 'Summer Term', 'Submission Date', '19 September 2014', 'Approved', 'Prof', 'Dr', 'Rolf Kreyer', 'Philipps University', 'Marburg\nContents', 'Abstract', 'Introduction', 'Theoretical', 'Empirical Background', 'General Aspects', 'Terminology', 'Adaptation', 'Loans', 'German Loans', 'English', 'Corpus Study', 'Example', 'German Loan', 'Morpheme', 'Choice', 'Corpus', 'Corpus Research', 'Orthography', 'Grammar', 'Semantics', 'Usage', 'Comparison', 'Donor Language', 'Conclusions', 'References', 'Appendix', 'Speaker Intuitions', 'Appendix', 'English Dictionary', 'Information', 'Appendix', 'Research Results', 'Table', 'Different Word', 'Classes', 'English', 'Table', 'Words Used', 'Most Commonly', 'English', 'Table', 'Different Word', 'Classes', 'German', 'List', 'Items Used', 'Most Commonly', 'German', 'Appendix', 'German Dictionary', 'Information', 'Confirmation', 'Authorship', 'Abstract\nLoan', 'The', 'For', 'German', 'English', 'Ten', 'Ten', 'The', 'German', 'Introduction\nOne', 'Haugen', 'While', 'English', 'German', 'German', 'English', 'Stanforth', 'However', 'German', 'German', 'English', 'Limbach', 'German', 'One', 'The', 'English', 'American', 'British English', 'English', 'Limbach', 'Thus', 'The', 'American', 'Recently', 'Relieved', 'American English', 'Limbach', 'One', 'In', 'As Stanforth', 'English', 'German', 'His', 'German', 'English', 'Stanforth', 'Accordingly', 'German', 'Pfeffer', 'Cannon', 'German', 'English', 'It', 'German', 'English', 'Such', 'Meier', 'Stubbs', 'German', 'Angst', 'English', 'German', 'Assuming', 'English', 'Corpus', 'German', 'However', 'Theoretical', 'Empirical Background', 'The', 'General Aspects', 'Terminology\nWhile', 'Haspelmath', 'Haugen', 'In', 'Concise Oxford', 'Dictionary', 'Linguistics', 'Although', 'Haugen', 'Furthermore', 'Haspelmath', 'Haugen', 'While', 'All', 'Haugen', 'Haspelmath', 'Why', 'There', 'Haspelmath', 'Thus', 'Meier', 'In', 'Stanforth', 'Adaptation', 'Loans\nNo', 'Haspelmath', 'Several', 'Pfeffer', 'Cannon', 'Haugen', 'However', 'First', 'For German', 'English', 'Meier', 'Pfeffer', 'Cannon', 'English', 'With', 'Haugen', 'It', 'German', 'English', 'English', 'German', 'Stanforth', 'As', 'With', 'Stubbs', 'German', 'English', 'German', 'One', 'Stanforth', 'In', 'Pfeffer', 'Cannon', 'German Loans', 'English\nWhile', 'German', 'English', 'German', 'English', 'It', 'Stubbs', 'Stanforth', 'English', 'Stubbs', 'Possible Reasons', 'English', 'Great Britain', 'United States', 'Stanforth', 'Thus', 'English', 'German', 'Stubbs', 'Pfeffer', 'Cannon', 'The', 'German', 'Stubbs', 'There', 'English', 'The', 'The', 'German', 'German', 'Stubbs', 'Stanforth', 'Estimates', 'German', 'English', 'Stanforth', 'Stubbs', 'In', 'German', 'English', 'Stanforth', 'English', 'Pfeffer', 'Cannon', 'Corpus Study', 'Based', 'German', 'Example', 'German Loan', 'Morpheme\nAs', 'German', 'English', 'German', 'This', 'First', 'Limbach', 'English', 'The', 'Stanforth', 'English', 'German', 'British National', 'Corpus', 'Second', 'Limbach', 'Furthermore', 'The', 'English', 'Appendix', 'The', 'The', 'English', 'Apart', 'Stubbs', 'Meier', 'For', 'English', 'All', 'Appendix', 'With', 'Additionally', 'Longman Dictionary', 'Contemporary English', 'Merriam', 'Webster Dictionary', 'Further', 'While', 'Oxford English', 'Dictionary', 'German', 'Limbach', 'German', 'Dictionary', 'Meier', 'An', 'Choice', 'Corpus\nThere', 'First', 'Onysko', 'Winter', 'Froemel', 'This', 'Second', 'English', 'Third', 'Stubbs', 'As', 'Ten', 'Ten', 'Web', 'This', 'Web', 'Jakub', 'It', 'Web', 'Furthermore', 'Sketch Engine', 'Ten', 'Ten', 'German', 'German', 'The', 'Ten', 'Ten', 'German', 'Another', 'Ten', 'Ten', 'Jakub', 'Corpus Research', 'For', 'Sketch Engine', 'Corpus Query', 'Language', 'As', 'Ten', 'Ten', 'German', 'The', 'English', 'Orthography', 'To', 'Grammar', 'Which', 'Is', 'Semantics', 'What', 'Which', 'What', 'Comparison', 'Which', 'German', 'The', 'Orthography\nThe', 'German', 'German', 'German', 'These', 'German', 'English', 'Nothing', 'English', 'German', 'Grammar\nAs', 'It', 'Stanforth', 'Thus', 'It', 'In', 'Consequently', 'Apart', 'Is', 'Table', 'Appendix', 'Taking', 'Thus', 'These', 'Also', 'Jennifer', 'There', 'Australia', 'What', 'Even', 'English', 'English', 'Second', 'This', 'Indeed', 'In', 'Rather', 'Semantics', 'Usage\nAs', 'English', 'As', 'Sketch Engine', 'Python', 'Table', 'Appendix', 'Even', 'This', 'Limbach', 'As', 'This', 'Rafael', 'Christmas', 'Perennial', 'Duke', 'Mike Kzryzewksi', 'He', 'So', 'Or', 'The', 'Thus', 'No', 'Rumors', 'Rowling', 'Harry Potter', 'Limbach', 'Additionally', 'As', 'In Stanforth', 'German', 'English', 'Nazi Germany', 'Thus', 'German', 'Germany', 'Nazi', 'Hitler', 'German', 'Only', 'This', 'English', 'Comparison', 'Donor Language', 'To', 'German', 'In', 'Ten', 'Ten', 'Table', 'Appendix', 'The', 'German', 'These', 'Only', 'Accordingly', 'Appendix', 'Python', 'As', 'German', 'English', 'The', 'Duden Online', 'Appendix', 'Duden', 'However', 'English', 'Rather', 'In', 'German', 'English', 'German', 'Conclusions\nThe', 'English', 'Its', 'German', 'Furthermore', 'German', 'These', 'English', 'Of', 'Diachronic', 'German', 'Investigating', 'Furthermore', 'In', 'It', 'English', 'References\nHaspelmath', 'Martin', 'Lexical', 'Concepts', 'In Haspelmath', 'Martin', 'Tadmor', 'Uri', 'Loanwords', 'World', 'Languages', 'Comparative Handbook', 'Berlin', 'Walter', 'Gruyter', 'Haugen', 'Einar', 'The Analysis', 'Linguistic Borrowing', 'In', 'Language', 'April', 'June', 'Available Online', 'Last', '5 September 2014', 'Jakub', 'Milo', 'Kilgarriff', 'Adam', 'Kov', 'Vojt', 'Rychl', 'Pavel', 'Suchomel', 'The Ten', 'Ten Corpus', 'Family', 'In', 'Proceedings', 'International Corpus', 'Linguistics Conference', '26 July 2013', 'Lancaster', 'Available Online', 'Corpora', 'Ten', 'Ten', 'Last', '10 September 2014', 'Limbach', 'Jutta', 'Ausgewanderte', 'Eine Auswahl', 'Beitr', 'Ausschreibung', 'Ausgewanderte', 'Ismaning', 'Hueber', 'Meier', 'The Status', 'Foreign Words', 'English', 'The Case', 'Eight German', 'Words', 'In', 'American Speech', 'Summer', 'Available Online', 'Last', '5 September 2014', 'Onysko', 'Alexander', 'Winter', 'Froemel', 'Esme', 'Necessary', 'Exploring', 'In', 'Journal', 'Pragmatics', 'Available Online', 'Last', '5 September 2014', 'Pfeffer', 'Alan', 'Cannon', 'Garland', 'German Loanwords', 'English', 'An Historical', 'Dictionary', 'Cambridge', 'New York', 'Cambridge University', 'Press', 'Stanforth', 'Anthony', 'Functional', 'Stylistic Aspects', 'German Loans', 'English', 'In Flood', 'John', 'Salmon', 'Paul', 'Sayce', 'Oliver', 'Wells', 'Christopher', 'Das', 'Band', 'Sprache', 'Studies', 'German Language', 'Linguistic History', 'Memory', 'Leslie Seiffert', 'Stuttgart', 'Hans', 'Dieter Heinz', 'Akademischer Verlag', 'Stanforth', 'Anthony', 'Deutsche Einfl', 'Wortschatz', 'Geschichte', 'Gegenwart', 'Beitrag', 'Amerikanischen Englisch', 'Eichhoff', 'Niemeyer', 'Stubbs', 'Michael', 'Words', 'Phrases', 'Corpus Studies', 'Lexical Semantics', 'Oxford', 'Blackwell', 'Internet Sources', '9 September 2014', '10 September 2014', '10 September 2014', '8 September 2014', '8 September 2014', '8 September 2014', '8 September 2014', '10 September 2014', 'Corpora', 'Ten', 'Ten', '10 September 2014', 'Sk', 'Corpus', 'Querying', '15 September 2014', 'September', 'Appendix', 'Speaker Intuitions', 'Speaker', 'English', 'Limbach', 'Englisch', 'In', 'Jugendsprache', 'Steigerungsform', 'Alle', 'Klassen', 'Jugend Gro', 'Wort', 'Sprache', 'Umlaute', 'Nutzen', 'Wie', 'Great Britain', 'Das', 'Befragten', 'Christian Fuchs', 'Berlin', 'Deutschland', 'Limbach', 'Britisches Englisch', 'Beim Zeitunglesen', 'Grossbritannien', 'Woerter', 'Fuer', 'Gro', 'Meist', 'Hin', 'Wort', 'Uberflieger', 'Bedeutung', 'Weg', 'Presse', 'Vor', 'Zusammensetzung', 'Dadurch', 'Bedeutung', 'Wortes', 'Gegenteil', 'Je', 'Zusammenhang', 'Angelika Mohr', 'London', 'Gro', 'Limbach', 'Amerkanisches Englisch', 'The', 'American', 'Recently', 'Relieved', 'American English', 'The', 'That', 'Additionally', 'Rumors', 'Rowling', 'Harry Potter', 'The', 'American', 'American', 'Nazi', 'Aryan', 'This American', 'Nietzschean', 'Robert Keeley', 'Worcester', 'Massachusetts', 'Limbach', 'Appendix', 'English Dictionary', 'Information\nEntries', 'English', 'Definitions', 'America', 'Bill Gates', 'Bohemians\nWord', 'Origin', 'German', 'Collins English', 'Dictionary', 'Pamela Lee', 'Longman Dictionary', 'Contemporary English', 'Full Definition', 'Variants', 'Origin', 'German', 'Old High', 'German', 'Merriam', 'Webster Dictionary', 'German', 'Oxford Dictionary', 'English', 'Appendix', 'Research Results', 'Table', 'Different Word', 'Classes', 'English', 'Table', 'Words Used', 'Most Commonly', 'English\nThe', 'Items', 'The', 'Connotations', 'Oxford Dictionary', 'English', 'Table', 'Different Word', 'Classes', 'German', 'List', 'Items Used', 'Most Commonly', 'German\nThe', 'Appendix', 'German Dictionary', 'Information\nEntry', 'Duden', 'Bildungen', 'Adjektiven', 'Verst', 'Gebrauch', 'Beispiel', 'Bildungen', 'Adjektiven', 'Eigenschaft', 'Beispiel', 'Bildungen', 'Adjektiven', 'Verben', 'Ma', 'Beispiele', 'Bildungen', 'Verben', 'Sache', 'Beispiel', 'Bildungen', 'Verben', 'Bedecken', 'Sicherstrecken', 'Beispiel', 'Bildungen', 'Substantiven', 'Endung', 'Sache', 'Oberseite', 'Beispiel', 'Bildungen', 'Verben', 'Wechseln', 'Stelle', 'Beispiel', 'Bildungen', 'Substantiven', 'Zuviel', 'Beispiel', 'Bildungen', 'Substantiven', 'Beispiel', 'Bildungen', 'Substantiven', 'Figur', 'Sache\nBeispiel', 'Confirmation', 'Authorship', 'All', 'The', 'Internet', 'Violation', 'Marburg', '19 September 2014']
###Markdown
**Exercise 10)**
###Code
sent = ['The', 'dog', 'gave', 'John', 'the', 'newspaper']
print [(w, len(w)) for w in sent]
###Output
[('The', 3), ('dog', 3), ('gave', 4), ('John', 4), ('the', 3), ('newspaper', 9)]
###Markdown
**Exercise 11)**
###Code
raw = 'Tres tristes tigres comen trigo en un trigal.'
raw.split('t')
###Output
_____no_output_____
###Markdown
**Exercise 12)**
###Code
for char in raw[:10]:
print char
###Output
T
r
e
s
t
r
i
s
t
###Markdown
**Exercise 13)**
###Code
raw.split()
raw.split(' ')
sent = 'Tres\ttristes\ttigres\tcomen\ttrigo\ten\tun\ttrigal.'
sent.split()
sent.split(' ')
sent = 'Tres tristes tigres comen trigo en un trigal.'
sent.split()
sent.split(' ')
sent = 'Tres \ttristes\t\t\ttigres\t\t comen\t \t trigo en un trigal.'
sent.split()
sent.split(' ')
###Output
_____no_output_____
###Markdown
**Exercise 14)**
###Code
words = raw.split()
print words
words.sort()
print words
words = raw.split()
sorted(words)
words
# .sort() changes original list, sorted() returns new list
###Output
_____no_output_____
###Markdown
**Exercise 15)**
###Code
'3' * 7
3 * 7
int('3') * 7
str(3) * 7
###Output
_____no_output_____
###Markdown
**Exercise 16)**
###Code
montyTest
from test import montyTest
montyTest
import test
test.montyTest
###Output
_____no_output_____
###Markdown
**Exercise 17)**
###Code
words = ['some', 'superexcitingly', 'long', 'example', 'words']
for w in words:
print '%6s' % w,
for w in words:
print '%-6s' % w,
for w in words:
print '%6s' % w
###Output
some
superexcitingly
long
example
words
###Markdown
**Exercise 18)**
###Code
myCorpus = load('corpus')
tokens = nltk.wordpunct_tokenize(myCorpus)
whWords = [w for w in tokens if w.startswith('wh') or w.startswith('Wh')]
print whWords[:50]
print sorted(set(whWords))
###Output
['What', 'When', 'Whenever', 'Which', 'While', 'Why', 'what', 'whatever', 'when', 'where', 'whereas', 'whether', 'which', 'while', 'white', 'who', 'whole', 'whose', 'why']
###Markdown
**Exercise 19)**
###Code
freqs = open('freqs.txt').readlines()
freqs
splitted = [[line.split()[0], int(line.split()[1])] for line in freqs]
splitted
###Output
_____no_output_____
###Markdown
**Exercise 20)**
###Code
# extracts the topic of the article of the day of given Wikipedia Homepage
def find_topic(url, trigger):
text = urllib.urlopen(url).read()
index = text.rfind(trigger)
text = text[index:]
title_with_markup = re.findall(r'\<b\>.+?\<\/b\>', text)[0]
soup = BeautifulSoup(title_with_markup)
return soup.get_text()
# German Wikipedia:
print find_topic('https://de.wikipedia.org/wiki/Wikipedia:Hauptseite', '<span class="mw-headline" id="Artikel_des_Tages">Artikel des Tages</span>')
# English Wikipedia:
print find_topic('https://en.wikipedia.org/wiki/Main_Page', '<span class="mw-headline" id="From_today.27s_featured_article">From today\'s featured article</span>')
# Danish Wikipedia:
print find_topic('https://da.wikipedia.org/wiki/Forside', '<div style="padding-left: 38px; color:#333;">Ugens artikel</div>')
###Output
Maurice Harold Macmillan
Montreal Laboratory
Jan Palach
###Markdown
**Exercise 21)**
###Code
def unknown(url):
content = getContentFromURL(url)
lowercased = re.findall(r'[\s\(\[\{]([a-z]+)', content)
words = nltk.corpus.words.words()
return [w for w in lowercased if w not in words]
print unknown('https://en.wikipedia.org/wiki/Main_Page')
# derived forms, abbreviations, foreign words
###Output
_____no_output_____
###Markdown
**Exercise 22)**
###Code
print unknown('http://news.bbc.co.uk/')
def unknown(url):
text = urllib.urlopen(url).read()
text = re.sub(r'\<script(?:.|\n)*?\<\/script\>', '', text)
text = re.sub(r'\<style(?:.|\n)*?\<\/style\>', '', text)
soup = BeautifulSoup(text)
content = soup.get_text()
lowercased = re.findall(r'[\s\(\[\{]([a-z]+)', content)
words = nltk.corpus.words.words()
return set([w for w in lowercased if w not in words])
print unknown('http://www.bbc.com/news')
###Output
set([u'senators', u'named', u'videos', u'resigns', u'hats', u'crimes', u'hairstyle', u'officers', u'arriving', u'retires', u'punched', u'farewells', u'allowed', u'cows', u'choices', u'birds', u'issues', u'languages', u'tv', u'alerts', u'girls', u'alleged', u'comments', u'children', u'parties', u'filming', u'buns', u'condemns', u'actors', u'helps', u'descended', u'memes', u'has', u'decades', u'aliens', u'shows', u'kicked', u'haveyoursay', u'seconds', u'dies', u'approves', u'earlier', u'grossing', u'robots', u'hours', u'allowing', u'sites', u'scores', u'remarks', u'died', u'hits', u'migrants', u'turbans', u'sanctions', u'charges', u'introduces', u'believes', u'overworked', u'steps', u'temperatures', u'shares', u'minutes', u'finds', u'launched', u'inbox'])
###Markdown
**Exercise 23)**
###Code
sample_text = "I don't hate regular expressions."
nltk.regexp_tokenize(sample_text, r'n\'t|\w+')
# doesn't work because of greediness of operators -> don matches \w+
print nltk.regexp_tokenize(sample_text, r'\w+(?=n\'t)|n\'t|\w+')
print nltk.regexp_tokenize('It doesn\'t split donald.', r'\w+(?=n\'t)|n\'t|\w+') # ?= lookahead assertion
###Output
['I', 'do', "n't", 'hate', 'regular', 'expressions']
['It', 'does', "n't", 'split', 'donald']
###Markdown
**Exercise 24)**
###Code
def encode(text):
text = text.lower();
trans = [('ate', '8'), ('e', '3'), ('i', '1'), ('o', '0'), ('l', '|'), ('s', '5'), ('\.', '5w33t!')]
for (key, value) in trans:
text = re.sub(key, value, text)
return text
print encode('Hello World!')
print encode('It is getting late.')
def encode_enhanced(text):
text = text.lower();
trans = [('ate', '8'), ('e', '3'), ('i', '1'), ('o', '0'), ('l', '|'), ('^s|(?<=\s)s', '$'), ('s', '5'), ('\.', '5w33t!')]
#?<= lookbehind assertion
for (key, value) in trans:
text = re.sub(key, value, text)
return text
encode_enhanced('Should treat sea different from ass.')
###Output
_____no_output_____
###Markdown
**Exercise 25)**
###Code
# a
def piginizeWord(word):
cons = re.findall(r'^[^aeiouAEIOU]*', word)
return word[len(cons[0]):] + cons[0] + 'ay'
piginizeWord('string')
# b
def piginizeText(text):
def helper(matchObj):
return piginizeWord(matchObj.group(0))
return re.sub(r'[A-Za-z]+', helper, text)
piginizeText('Some quiet string here that should be converted to Pig Latin at once.')
# c
def piginizeWordImproved(word):
cons = re.findall(r'^[^aeiouAEIOU]+(?=y)|^[^aeiouqAEIOUQ]*(?:qu)?(?:Qu)?[^aeiouqAEIOUQ]*', word)[0]
remainder = word[len(cons):]
if (word.istitle()):
return remainder.title() + cons.lower() + 'ay'
return remainder + cons + 'ay'
def piginizeText(text):
def helper(matchObj):
return piginizeWordImproved(matchObj.group(0))
return re.sub(r'[A-Za-z]+', helper, text)
piginizeText('My quiet yellow stylish string that should be converted to Pig Latin at once.')
###Output
_____no_output_____
###Markdown
**Exercise 26)**
###Code
text = urllib.urlopen('https://tr.wikipedia.org/wiki/%C4%B0stanbul').read()
text = re.sub(r'\<script(?:.|\n)*?\<\/script\>', '', text)
text = re.sub(r'\<style(?:.|\n)*?\<\/style\>', '', text)
soup = BeautifulSoup(text)
content = soup.get_text()
tokens = nltk.wordpunct_tokenize(content)
text = nltk.Text(tokens)
words = [w.lower() for w in text]
vowel_sequences = []
for word in words:
vowels = ''.join(re.findall(r'[aeiou]', word))
if (len(vowels) > 0):
vowel_sequences.append(vowels)
print vowel_sequences[:50]
bigrams = []
for vowel_seq in vowel_sequences:
count = 0
while (count + 1 < len(vowel_seq)):
bigrams.append((vowel_seq[count], vowel_seq[count + 1]))
count += 1
print bigrams[:50]
vowels = ['a', 'e', 'i', 'o', 'u']
cfd = nltk.ConditionalFreqDist(bigrams)
cfd.conditions()
cfd.tabulate(conditions=vowels,samples=vowels)
###Output
a e i o u
a 4813 786 1975 355 880
e 506 1805 3821 245 207
i 1658 2884 2539 297 59
o 1048 215 221 287 672
u 900 154 174 50 860
###Markdown
**Exercise 27)**
###Code
import random
def laugh():
raw = ''.join(random.choice('aehh ') for x in range(500))
return ' '.join(raw.split())
laugh()
###Output
_____no_output_____
###Markdown
**Exercise 28)**
###Code
# three words -> woulld be compatible with splitting on whitespace
# one compound word -> would make sense semantically, may be relevant for natural language understanding applications
# nine words -> would make sense phonetically, relevant for speech processing applications
###Output
_____no_output_____
###Markdown
**Exercise 29)**
###Code
def ari(category):
words = nltk.corpus.brown.words(categories=category)
sents = nltk.corpus.brown.sents(categories=category)
av_wordlength = sum(len(w) for w in words) / len(words)
av_sentlength = sum(len(s) for s in sents) / len(sents)
return (4.71 * av_wordlength) + (0.5 * av_sentlength) - 21.43
print ari('lore')
print ari('learned')
print ari('government')
print ari('romance')
###Output
10.2547561971
11.9260070433
12.084303495
4.34922419804
###Markdown
**Exercise 30)**
###Code
porter = nltk.PorterStemmer()
lancaster = nltk.LancasterStemmer()
text = 'New rules allowing Sikh police officers to wear turbans instead of traditional police hats have been introduced in New York, officials say. The New York Police Department said the turbans must be navy blue and have the NYPD insignia attached. Under the new rules, religious members of the force are also permitted to grow beards up to half-an-inch long. Sikh officers have until now worn turbans under their caps. Beards have not been permitted.'
tokens = nltk.wordpunct_tokenize(text)
print [porter.stem(t) for t in tokens]
print '\n\n'
print [lancaster.stem(t) for t in tokens]
# Porter preserves upper case, uses unicode, seems to tend to longer stems
###Output
_____no_output_____
###Markdown
**Exercise 31)**
###Code
saying = ['After', 'all', 'is', 'said', 'and', 'done', ',', 'more', 'is', 'said', 'than', 'done', '.']
lengths = []
for w in saying:
lengths.append(len(w))
lengths
###Output
_____no_output_____
###Markdown
**Exercise 32)**
###Code
silly = 'newly formed bland ideas are inexpressible in an infuriating way'
# a
bland = silly.split()
print bland
# b
''.join(w[1] for w in bland)
# c
' '.join(bland)
# d
for w in sorted(bland):
print w
###Output
an
are
bland
formed
ideas
in
inexpressible
infuriating
newly
way
###Markdown
**Exercise 33)**
###Code
# a
'inexpressible'.index('re')
# b
words = ['this', 'is', 'a', 'dull', 'list', 'of', 'words']
words.index('dull')
# c
bland[:bland.index('in')]
###Output
_____no_output_____
###Markdown
**Exercise 34)**
###Code
def convertNationality(adjective):
if (adjective.endswith('dian') or adjective.endswith('ese')):
return adjective[:-3] + 'a'
elif (adjective.endswith('ian')):
return adjective[:-1]
print convertNationality('Canadian')
print convertNationality('Australian')
print convertNationality('Chinese')
###Output
Canada
Australia
China
###Markdown
**Exercise 35)**
###Code
pronouns = ['I', 'you', 'he', 'she', 'it', 'we', 'they']
corpus = ' '.join(nltk.corpus.webtext.words())
sample1 = re.findall(r'[aA]s best as (?:I|you|he|she|it|we|they) can', corpus)
print sample1[:10]
print len(sample1)
sample2 = re.findall(r'[aA]s best (?:I|you|he|she|it|we|they) can', corpus)
print sample2[:10]
print len(sample2)
###Output
[]
0
[u'as best you can']
1
###Markdown
**Exercise 36)**
###Code
print ' '.join(nltk.corpus.genesis.words('lolcat.txt')[:500])
def lolcat(word):
word = re.sub(r'ight', 'iet', word)
word = re.sub(r'^I$', 'ai', word)
word = re.sub(r'(?<=[^aeiouAEIOU])i$', 'ai', word)
word = re.sub(r'le$', 'el', word)
def helper(matchObj):
return 'e' + matchObj.group(1)
word = re.sub(r'([^aeiouAEIOU])e$', helper, word)
word = re.sub(r'(?<=[^aeiouAEIOU])er$', 'ah', word)
word = re.sub(r'ou', 'ow', word)
word = re.sub(r'Ou', 'Ow', word)
word = re.sub(r'(?<=[^aeiouAEIOU])y$', 'eh', word)
word = re.sub(r'th', 'f', word)
word = re.sub(r'Th', 'F', word)
word = re.sub(r'ing$', 'in', word)
return word
print lolcat('I')
print lolcat('hi')
print lolcat('right')
print lolcat('kite')
print lolcat('like')
print lolcat('over')
print lolcat('loud')
print lolcat('kitty')
print lolcat('three')
print lolcat('nothing')
print lolcat('little')
###Output
ai
hai
riet
kiet
liek
ovah
lowd
kitteh
free
nofin
littel
###Markdown
**Exercise 37)**
###Code
help(re.sub)
def clean(html):
# remove html tags:
text = re.sub(r'\<.*?\>', '', html)
# normalize whitespace:
text = re.sub(r'\s+', ' ', text)
return text
clean('<span class="some class">A span which should<br> be cleaned</span>')
###Output
_____no_output_____
###Markdown
**Exercise 38)**
###Code
# a
text = 'some text with long-\nterm and encyclo-\npedia'
words = re.findall(r'\w+\-\n\w+', text)
words
# b
for w in words:
print re.sub('\n', '', w)
# c
for w in words:
word = re.sub('\n', '', w)
parts = word.lower().split('-')
if (parts[0] not in nltk.corpus.words.words() and parts[1] not in nltk.corpus.words.words()):
print re.sub('\-', '', word)
else:
print word
###Output
long-term
encyclopedia
###Markdown
**Exercise 39)**
###Code
def soundex(name):
first = name[0]
# remove w & h
encoded = first.lower() + re.sub('[wh]', '', name[1:].lower())
# replace consonants with numbers
encoded = re.sub(r'[bfpv]', '1', encoded)
encoded = re.sub(r'[cgjkqsxz]', '2', encoded)
encoded = re.sub(r'[dt]', '3', encoded)
encoded = re.sub(r'l', '4', encoded)
encoded = re.sub(r'[mn]', '5', encoded)
encoded = re.sub(r'r', '6', encoded)
# merge adjacent same digits into one
count = 1
while count < 7:
encoded = re.sub(str(count) + '{2,}', str(count), encoded)
count += 1
# remove vowels
encoded = encoded[0].upper() + re.sub('[aeiouy]', '', encoded[1:])
# if first character is digit, replace it with the saved letter
if (encoded[0].isdigit()):
encoded = first.upper() + encoded[1:]
# encoded must contain 3 digits -> fill it up with zeros if too short
if (len(encoded) < 4):
encoded += '000'
return encoded[:4]
print soundex('Robert') #R163
print soundex('Rupert') #R163
print soundex('Rubin') #R150
print soundex('Ashcraft') #A261
print soundex('Ashcroft') #A261
print soundex('Tymczak') #T522
print soundex('Pfister') #P236
###Output
R163
R163
R150
A261
A261
T522
P236
###Markdown
**Exercise 40)**
###Code
def ari(raw):
sent_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
sents = sent_tokenizer.tokenize(raw)
words = nltk.word_tokenize(raw)
av_wordlength = sum(len(w) for w in words) / len(words)
av_sentlength = sum(len(s) for s in sents) / len(sents)
return (4.71 * av_wordlength) + (0.5 * av_sentlength) - 21.43
print ari(nltk.corpus.abc.raw("rural.txt"))
print ari(nltk.corpus.abc.raw("science.txt"))
###Output
68.5700668936
68.8154763376
###Markdown
**Exercise 41)**
###Code
words = ['attribution', 'confabulation', 'elocution', 'sequoia', 'tenacious', 'unidirectional']
# more elegant with regular expression instead of nested list comprehension:
vsequences = set([''.join(re.findall(r'[aeiou]', word)) for word in words])
sorted(vsequences)
# nested list comprehension:
vsequences = set([''.join([char for char in word if char in 'aeiou']) for word in words])
sorted(vsequences)
###Output
_____no_output_____
###Markdown
**Exercise 42)**
###Code
from nltk.corpus import wordnet as wn
class IndexedText(object):
def __init__(self, stemmer, text):
self._text = text
self._stemmer = stemmer
self._index = nltk.Index((self._stem(word), i)
for (i, word) in enumerate(text))
def concordance(self, word, width=40):
key = self._stem(word)
wc = int(width/4) # words of context
for i in self._index[key]:
lcontext = ' '.join(self._text[i-wc:i])
rcontext = ' '.join(self._text[i:i+wc])
offset = '(WordNet Offset: ' + str(wn.synsets(self._text[i])[0].offset()) + ')'
ldisplay = '%*s' % (width, lcontext[-width:])
rdisplay = '%-*s' % (width, rcontext[:width])
print ldisplay, rdisplay, offset
def _stem(self, word):
return self._stemmer.stem(word).lower()
porter = nltk.PorterStemmer()
grail = nltk.corpus.webtext.words('grail.txt')
text = IndexedText(porter, grail)
text.concordance('lie')
###Output
r king ! DENNIS : Listen , strange women lying in ponds distributing swords is no (WordNet Offset: 751944)
beat a very brave retreat . ROBIN : All lies ! MINSTREL : [ singing ] Bravest of (WordNet Offset: 6756831)
Nay . Nay . Come . Come . You may lie here . Oh , but you are wounded ! (WordNet Offset: 6756831)
doctors immediately ! No , no , please ! Lie down . [ clap clap ] PIGLET : Well (WordNet Offset: 6756831)
ere is much danger , for beyond the cave lies the Gorge of Eternal Peril , which (WordNet Offset: 6756831)
you . Oh ... TIM : To the north there lies a cave -- the cave of Caerbannog -- (WordNet Offset: 6756831)
h it and lived ! Bones of full fifty men lie strewn about its lair . So , brave k (WordNet Offset: 6756831)
not stop our fight ' til each one of you lies dead , and the Holy Grail returns t (WordNet Offset: 6756831)
###Markdown
**Exercise 43)**
###Code
def guessLanguage(text):
tokens = nltk.wordpunct_tokenize(text)
text = nltk.Text(tokens)
fdist_text = nltk.FreqDist(text)
best_guess = ('', 0)
best_intersection = []
for lang in nltk.corpus.udhr.fileids():
if (lang.endswith('-Latin1')):
fdist_lang = nltk.FreqDist(nltk.corpus.udhr.words(lang))
intersection = list(set(fdist_text.keys()) & set(fdist_lang.keys()))
dict_text = []
dict_lang = []
for word in intersection:
dict_text.append((word, fdist_text[word]))
dict_lang.append((word, fdist_lang[word]))
spearman = nltk.spearman_correlation(dict_text, dict_lang)
if ((best_guess[1] == 0 and spearman != 0.0) or (spearman != 0.0 and spearman > best_guess[1])):
best_guess = (lang[:-7], spearman)
return best_guess[0];
help(nltk.spearman_correlation)
print guessLanguage('This is clearly an example of English text which should not be hard to recognize.')
print guessLanguage(u'Carapax (von gr. charax „Befestigungsanlage“, „Palisade“ und pagios „fest“; Plural: Carapaces) ist eine Bezeichnung für eine bei verschiedenen Tiergruppen (Taxa) unabhängig voneinander entstandene harte Bedeckung der Körperoberseite. Bei Schildkröten heißt der Carapax gemeinsprachlich Rückenschild oder Rückenpanzer, bei Krustentieren (Krebstieren in der Küche) ist er ein Teil der „Schale“. Viele Krebstiere (Crustacea) besitzen eine Hautfalte, die vom Kopfhinterrand (Segment der 2. Maxille) ausgeht; diese kann auch primär (z. B. Cephalocarida) oder sekundär (z. B. Asseln und Flohkrebse) fehlen, gehört also nicht zum Grundbauplan der Krebstiere. Vielfach ist die chitinöse Kopffalte durch eingelagerten Kalk panzerartig versteift, vor allem bei vielen Zehnfußkrebsen. Bedeckt diese Struktur als Rückenschild einige oder ggf. alle Rumpfsegmente, wird sie Carapax genannt. Der Carapax schließt also an den Kopf an, setzt sich über dessen Hinterrand hinaus fort und erstreckt sich mehr oder weniger weit über den Rumpf des Krebses. Je nach Ausbildung kann er auch den Kopf selbst umhüllen (z. B. bei den Muschelkrebsen) und mehr oder weniger weit auch seitlich herabgezogen sein. – Zum Artikel …')
print guessLanguage(u'Dødsstraf eller livsstraf er henrettelse som straf for en forbrydelse. I de jurisdiktioner, der praktiserer dødsstraf, er den som regel forbeholdt et lille antal alvorlige forbrydelser, ofte overlagt mord og landsforræderi. I Kina praktiseres tillige dødsstraf for økonomisk kriminalitet og narkokriminalitet, og i Iran for homoseksualitet, ligesom der i visse områder kontrolleret af islamiske oprørsbevægelser gennemføres henrettelser baseret på en streng fortolkning af sharia. Mange lande har dødsstraf i den militære straffelov eller for forbrydelser begået i krigstid. I Danmark blev dødsstraf første gang afskaffet i den borgerlige straffelov den 15. april 1930. Loven trådte i kraft 15. april 1933. Dødsstraf blev på dette tidspunkt beholdt i den militære straffelov. I forbindelse med retsopgøret efter 2. verdenskrig genindførtes dødsstraffen (som kaldtes livsstraf) i 1945 for forbrydelser begået under besættelsen. Loven var en særlov og kendes som Landsforræderloven eller retteligen Straffelovstillægget og havde tilbagevirkende kraft for handlinger begået efter 9. april 1940. 46 personer blev på den baggrund henrettet af frivillige politifolk. Den 20. juli 1950 kl. 01:00 blev Ib Birkedal Hansen henrettet som den sidste i Danmark. (Læs mere..)')
###Output
Danish_Dansk
###Markdown
**Exercise 44)**
###Code
def novel_sense(word, text):
content_words = []
stopwords = nltk.corpus.stopwords.words('english')
count = 0
for w in text:
if (w.isalpha() and w not in stopwords):
content_words.append((w, count))
count += 1
count = 0
oddest = False
for w in content_words:
if (w[0] == word):
count_comparisons = 0
overall_sim = 0
for synset in wn.synsets(w[0]):
# compare to words in context on left side:
for index in range(1, min(21, count+1)):
context_word = content_words[count - index][0]
for context_synset in wn.synsets(context_word):
path_sim = synset.path_similarity(context_synset)
if (path_sim != None):
overall_sim += path_sim
count_comparisons += 1
# compare to words in context on right side:
for index in range(1, min(21, len(content_words)-count-1)):
context_word = content_words[count + index][0]
for context_synset in wn.synsets(context_word):
path_sim = synset.path_similarity(context_synset)
if (path_sim != None):
overall_sim += path_sim
count_comparisons += 1
av_sim = overall_sim / count_comparisons
if (oddest == False or oddest[1] > av_sim):
oddest = (w[1], av_sim) # w[1] = original index of the word in the text
count += 1
if (oddest != False):
print text[max(0, oddest[0] - 50):min(oddest[0] + 50, len(text))]
print 'Average Similarity: ', str(oddest[1])
novel_sense('love', nltk.corpus.gutenberg.words('austen-emma.txt'))
###Output
[u'Jane', u'Fairfax', u'therefore', u'that', u'he', u'would', u'have', u'preferred', u'the', u'society', u'of', u'William', u'Larkins', u'.', u'No', u'!--', u'she', u'was', u'more', u'and', u'more', u'convinced', u'that', u'Mrs', u'.', u'Weston', u'was', u'quite', u'mistaken', u'in', u'that', u'surmise', u'.', u'There', u'was', u'a', u'great', u'deal', u'of', u'friendly', u'and', u'of', u'compassionate', u'attachment', u'on', u'his', u'side', u'--', u'but', u'no', u'love', u'.', u'Alas', u'!', u'there', u'was', u'soon', u'no', u'leisure', u'for', u'quarrelling', u'with', u'Mr', u'.', u'Knightley', u'.', u'Two', u'days', u'of', u'joyful', u'security', u'were', u'immediately', u'followed', u'by', u'the', u'over', u'-', u'throw', u'of', u'every', u'thing', u'.', u'A', u'letter', u'arrived', u'from', u'Mr', u'.', u'Churchill', u'to', u'urge', u'his', u'nephew', u"'", u's', u'instant', u'return', u'.', u'Mrs']
Average Similarity: 0.12389011066
|
09-DataVisualization.ipynb | ###Markdown
Data Visualization with PythonData Visualization might mean a of different things. Here we are only going to deal with basic 2D plotting, using Matplotlib, which is a library for plotting with Python.Matplotlib is very well supported with examples that you can use to figure out how it works. Here, we will run through a quick intro of using matplotlib, with examples adapted from the official materials. Data Visualization is a way to 'see' your data, for data exploration and data communication.Check out matplotlibs officialwebsiteincluding their introductory tutorial,and list of examples. Pyplot vs PylabNote that matplotlib is an and old, and quite massive, module, and actually has a couple different ways to use it. Short version: import 'matplotlib.pyplot', like we do here is perhaps most common, and will work!Long version: follow the following link to learn a bit about the different ways in which matplotlib can be used (pyplot, pylab, etc.). Ultimately, it all calls the same underlying code, there is just different syntax for accessing everything. A description of the different ways to interact with matplotlib is availabehere.
###Code
# Import matplotlib - the main python plotting package
import matplotlib.pyplot as plt
# Import numpy functions for generating test data to plot
import numpy as np
from numpy.random import rand
# This magic command plots figures directly in the notebook
%matplotlib inline
# This sets a higher resolution for figures
%config InlineBackend.figure_format = 'retina'
###Output
_____no_output_____
###Markdown
Line graph
###Code
# Let's create some fake data to plot
x = np.arange(0, 11)
y = x**2
# In matplotlib, we can make a figure and then plot to it
# Make a figure
f = plt.figure()
# To plot, simply pass the x and y data to the plot function
plt.plot(x, y)
###Output
_____no_output_____
###Markdown
Without any other information, matplotlib will add a number of plotting attributes by default.For example, by default we get lines around the plot, tick marks as well as axis number labels. We can customize all of these things, and add more stuff to the plot as well. Scatter PlotLets try creating a Scatter plotLet's image two groups of data, that we want to plot together on a scatter plot to compare.
###Code
# Create some Data
n = 50 # n is the number of data points
x = rand(n) # Randomly create x data points
y1 = rand(n) # Randomly create 1st group of y data points
y2 = rand(n) # Randomly create 2nd group of y data points
###Output
_____no_output_____
###Markdown
Creating a Scatter PlotThe 'scatter' command works about the same as the plot command, but makes a scatter plot instead of a line. Here, we're adding another argument, 'c' which specifies the colour to make the pointsThere are lots of optional arguments we can add to 'plot' and 'scatter'
###Code
# Plot the first set of data
plt.scatter(x, y1, c='blue')
# We now want to plot the second set of data on the same plot.
# The 'hold' command tells Python to hold onto the current figure and add the next data to it.
plt.hold
# Plot the second set of data
plt.scatter(x, y2, c='red')
###Output
_____no_output_____
###Markdown
We now have a scatter plot!However, it doesn't look very nice. Let's see if we can improve it. Let's experiment with adding lots of the customization functions to make a nicer graph.
###Code
# Create a new figure
fig = plt.figure()
# Add an axes to our figure
ax = plt.axes()
# Plot the data again
ax.scatter(x, y1, c='blue', alpha=0.6, label='Group 1', s=50)
plt.hold
ax.scatter(x, y2, c='red', alpha=0.6, label='Group 2', s=50)
# Here we've added some more optional arguments.
# alpha - sets the transparency of the data points
# label - makes a label for the data we've plotted, which can be used in the legend
# s - size. Changes the size of the dots we plot
# Add a title to our graph
plt.title('My Awesome Scatter Plot', fontsize=18, fontweight='bold')
# Add data labels
plt.xlabel('X Data Label', fontsize=16, fontweight='bold')
plt.ylabel('Y Data Label', fontsize=16, fontweight='bold')
# Set the ranges to plot of the x and y variables
plt.xlim([-0.1, 1.1])
plt.ylim([-0.1, 1.1])
# Set the tick labels
ax.set_xticks(np.array([0.0, 0.2, 0.4, 0.6, 0.8, 1.0]))
ax.set_yticks(np.array([0.0, 0.2, 0.4, 0.6, 0.8, 1.0]))
# Turn the top and right side lines off
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Set the tick marks to only be on the bottom and the left.
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
# Set linewidth of remaining spines
ax.spines['left'].set_linewidth(2)
ax.spines['bottom'].set_linewidth(2)
# Add a legend. This will use the labels you defined when you set the data.
plt.legend(loc='upper left', scatterpoints=1, frameon=False)
# Legend doesn't require any arguments.
# Here we optionally specify where to put the legend ('loc'), how many points to use
# in the legend ('scatterpoints'), and wether to have a box around the legend ('frameon')
###Output
_____no_output_____
###Markdown
Figures and AxesNote above that sometimes we are calling these functions on 'ax' and sometimes we are calling with 'plt'We also created a thing called fig. What's all this?- 'plt' is just our nickname for the matplotlib module - Here, whenver we are using 'plt' we are implicitly calling a function from matplotlib on the current figure- 'fig' is just an arbitrary variable name we have been using to assign to a figure object. - We can use 'fig' (or whatever we call our figure) to update it after we have created it- 'ax' is also just an arbitrary variable name for the current axis. - We can us 'ax (or whatever we call our axes) to update a particular axis after we have created it
###Code
# 'fig' is a label for the figure we are working on.
# gcf() is a way to find the current figure.
print(type(fig)) # Figure is an object in matplotlib
print(fig) # This is the figure handle 'fig' we made before
print(plt.gcf(), '\n') # gcf grabs the current figure. In this case, current figure is same as 'fig'
# 'ax' is a name for the current axes. A figure can have many axes (figures can have subplots)
print(type(ax)) # Axes is a class of variable in matplotlib
print(ax) # This is the axes handle 'ax' that we made before
#print(plt.gca(), '\n') # gca grabs the current axes.
# In this case, current axis is 'ax'. Turned off as it plots an axis.
# Note that here gca is drawing an empty axis
###Output
<class 'matplotlib.axes._subplots.AxesSubplot'>
Axes(0.125,0.125;0.775x0.755)
###Markdown
Keeping track of figures and axes, and knowing what to call on what can be a bit confusing at first. Note that a lot of it gets managed behind the scenes - you don't have a to explicitly create new figures or axes. Matplotlib will make new figures and axes when it needs to, without explicitly being told to.If you're wondering why we would bother using fig and ax then: - We often don't need to, but it can be very useful to have a label to grab our figure (and manipulate it) when we need to. For example, we can get our figure back just by calling it's name.
###Code
# Redraw figure with 'fig' variable name
fig
###Output
_____no_output_____
###Markdown
Data VisualizationData Visualization can refer to a lot of different things. Here, we will start with making static 2D visualizations of data.To do so, we will use the `matplotlib` package. Matplotlib is a large and well supported package that forms the basis of a lot of plotting in Python. Data Visualization is a way to 'see' your data, for data exploration and data communication.Check out the matplotlibwebsite,including their introductory tutorial,and gallery of examples.
###Code
# Import matplotlib - the main python plotting package
import matplotlib.pyplot as plt
# Import numpy functions for generating test data to plot
import numpy as np
from numpy.random import rand
# This magic command plots figures directly in the notebook
%matplotlib inline
# This sets a higher resolution for figures
%config InlineBackend.figure_format = 'retina'
###Output
_____no_output_____
###Markdown
Line graphFirst, we will create a simple line graph.
###Code
# Let's create some fake data to plot
x = np.arange(0, 11)
y = x**2
# To plot, simply pass the x and y data to the plot function
plt.plot(x, y)
###Output
_____no_output_____
###Markdown
Without any other information, matplotlib will add a number of plotting attributes by default.For example, by default we get lines around the plot, tick marks, and axis number labels. We can customize all of these things, and add more stuff to the plot as well. Scatter PlotNext, lets try creating a scatter plot. To do so, we can simulate two groups of data, that we want to plot together on a scatter plot to compare.
###Code
# Create some Data
n = 50 # n is the number of data points
x = rand(n) # Randomly create x data points
y1 = rand(n) # Randomly create 1st group of y data points
y2 = rand(n) # Randomly create 2nd group of y data points
###Output
_____no_output_____
###Markdown
Creating a Scatter PlotThe 'scatter' command works about the same as the plot command, but makes a scatter plot instead of a line. Here, we're adding another argument, `color` which specifies the color to make the points. Note there are lots of optional arguments we can add to 'plot' and 'scatter', that we will explore more later.
###Code
# Plot the first set of data
plt.scatter(x, y1, color='blue')
# Now, plot both sets of data together
# We can do this by calling the plot call on each set of data
# Subsequent plot calls, like this one, will by default plot onto the same figure
plt.scatter(x, y1, color='blue')
plt.scatter(x, y2, color='red')
###Output
_____no_output_____
###Markdown
We now have a scatter plot! Customizing PlotsThe plot above shows the data, but aesthetically there is more we could do to make it look nicer. Next up, we will replot the data, and add some customization to the plot. In the next cell, we will add lots of customization. It's a large set of code, but to explore how it all works, work through bit by bit, and try passing in different values, and see what it does to the resultant plot.
###Code
# Create a new figure
# In this case we are explicitly creating the figure that we will plot to
fig = plt.figure()
# Add an axes to our figure
# Figures can have multiple axes. This adds a single new axes to our figure
ax = plt.axes()
# Plot the data again
ax.scatter(x, y1, color='blue', alpha=0.6, label='Group 1', s=50)
ax.scatter(x, y2, color='red', alpha=0.6, label='Group 2', s=50)
# Here we've added some more optional arguments:
# alpha - sets the transparency of the data points
# label - makes a label for the data we've plotted, which can be used in the legend
# s (size) - changes the size of the dots we plot
# Add a title to our graph
plt.title('My Awesome Scatter Plot', fontsize=18, fontweight='bold')
# Add data labels
ax.set_xlabel('X Data Label', fontsize=16, fontweight='bold')
ax.set_ylabel('Y Data Label', fontsize=16, fontweight='bold')
# Set the ranges to plot of the x and y variables
ax.set_xlim([-0.1, 1.1])
ax.set_ylim([-0.1, 1.1])
# Set the tick labels
ax.set_xticks(np.array([0.0, 0.2, 0.4, 0.6, 0.8, 1.0]))
ax.set_yticks(np.array([0.0, 0.2, 0.4, 0.6, 0.8, 1.0]))
# Turn the top and right side lines off
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Set the tick marks to only be on the bottom and the left.
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
# Set linewidth of remaining spines
ax.spines['left'].set_linewidth(2)
ax.spines['bottom'].set_linewidth(2)
# Add a legend. This will use the labels you defined when you set the data.
ax.legend(loc='upper left', scatterpoints=1, frameon=False)
# Note that legend doesn't require any arguments
# Here we optionally specifing:
# 'loc' - where to put the legend
# 'scatterpoints' - how many points to show in the legend
# 'frameon' - whether to have a box around the legend
###Output
_____no_output_____
###Markdown
Figures and AxesNote that in the above example, we defined a figure object, `fig`, and an axes object, `ax`. You might also notice that sometimes we used called function from `plt`, and sometimes called methods directly on the `ax` object. So, what are these different things?- `plt` is then name we have given the imported matplotlib module - Here, whenever we are using 'plt' we are calling a function from matplotlib - By default, this gets applied to the current figure (the most recent one created)- `fig` is a variable name we have given to the figure object - A figure object is the whole figure that we are creating - We can use 'fig' (or whatever we call our figure) to access or update our figure after we have created it- `ax` is also a variable name, for the current axis - A figure can have multiple axes (though our figure above only has one) - To update a value on an axes object, you can call a `set_` method on the axes object, like we do above
###Code
# 'fig' is a label for the figure we are working on.
# gcf() is a way to find the current figure.
print(type(fig)) # Figure is an object in matplotlib
print(fig) # This is the figure handle 'fig' we made before
print(plt.gcf(), '\n') # gcf grabs the current figure. In this case, current figure is same as 'fig'
# 'ax' is a name for the current axes. A figure can have many axes (figures can have subplots)
print(type(ax)) # Axes is a class of variable in matplotlib
print(ax) # This is the axes handle 'ax' that we made before
# Note that if you need to grab the current axes, you can do so with `plt.gca()`
###Output
<class 'matplotlib.axes._subplots.AxesSubplot'>
AxesSubplot(0.125,0.125;0.775x0.755)
###Markdown
Keeping track of figures and axes, can be a bit confusing at first. Note that typically a lot of managing matplotlib objects objects can happen automatically. In many cases, many figures in different cells, for example, matplotlib will make new figures and axes when it needs to, without you having to explicitly specify this. Defining or accessing figure and axes objects can be useful when customizing plots, replotting things later, or for more custom or complex plotting tasks. That is, it can be useful to have a label to grab our figure, and manipulate it, if and when we need to. For example, we can get our figure back just by calling the figure object name.
###Code
# Redraw figure with 'fig' variable name
fig
###Output
_____no_output_____ |
Mini_Data_Anal/Dacon_project_notebook.ipynb | ###Markdown
plt.figure(figsize=(20,4))plt.title('revenue_ratio')sns.countplot(alldata['revenue_ratio'], hue=alldata['OC']) 병원 종류별 개/폐업 시각화 - 분석에 큰 도움이 되지 않음plt.figure(figsize=(20,6))sns.countplot(alldata['profit1_-_profit2'], hue=alldata['OC']) 경영권 변경이 개/폐업에 미치는 영향여부 확인 - 분석에 큰 영향을 주지 않음plt.figure(figsize=(20,6))sns.countplot(alldata['debt_total_ratio'], hue=alldata['OC']) 병원별 매출액 비교plt.figure(figsize=(15,7))sns.boxplot( alldata['opr'] ,hue=alldata['OC']) le = LabelEncoder()le_col = ['sido','instkind','ownerChange']for i in le_col:alldata2[i]=le.fit_transform(list(alldata2[i]))alldata2
###Code
alldata2=alldata.fillna(-1)
alldata2
train2=alldata2[:len(train_file)] #데이터 이름으로 나누는 것이 가능한 이유는 concat을 할때, 구분이 되어 합병이 되기 때문이다.
test2=alldata2[len(train_file):]
train3= train2.drop(['OC','inst_id','openDate','sido','sgg','instkind','ownerChange','employee1','employee2'], axis=1)
train2['OC'] =str(train2['OC']) #OC의 분류를 위하여 카테고리형으로 치환하기. 치환할때는 문자형을 바꾸어 줘야지 오류가 생기지 않음
le= LabelEncoder()
train2['OC'] =le.fit_transform(train2['OC'])
train2['sido'] =str(train2['sido']) #OC의 분류를 위하여 카테고리형으로 치환하기. 치환할때는 문자형을 바꾸어 줘야지 오류가 생기지 않음
le= LabelEncoder()
train2['sido'] =le.fit_transform(train2['sido'])
train2
train3
#alldata['OC']=le.fit_transform(alldata['OC'])
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier()
from sklearn.model_selection import GridSearchCV
params = { 'n_estimators' : [20] }
grid_cv = GridSearchCV(rf, param_grid=params, n_jobs=-1, cv=5)
grid_cv.fit(train3, train2['OC'])
print('best parameter :', grid_cv.best_params_)
print('best score :', grid_cv.best_score_)
grid_cv.best_estimator_
from sklearn.ensemble import RandomForestClassifier
rf=RandomForestClassifier()#n_jobs=-1)
#random_state=42,
#oob_score=True) #random_state=결과값 고정
rf.fit(train2, train['OC'])
result=rf.predict()
a=pd.Series(rf.feature_importances_, index=train2.columns).sort_values(ascending=False)
a
a=pd.Series(rf.feature_importances_, index=train2.columns).sort_values(ascending=False)
plt.figure(figsize=(15,15))
sns.barplot(a, a.index) #특정 칼럼 중요도 파악
result
sub=pd.read_csv('submission_sample.csv')
sub['OC']=result
sub.to_csv('hos_1.csv',index=False)
###Output
_____no_output_____ |
M_accelerate_6lastlayer-8fold_91.41-Copy1.ipynb | ###Markdown
MobileNet - Pytorch Step 1: Prepare data
###Code
# MobileNet-Pytorch
import argparse
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from torchvision import datasets, transforms
from torch.autograd import Variable
from torch.utils.data.sampler import SubsetRandomSampler
from sklearn.metrics import accuracy_score
#from mobilenets import mobilenet
use_cuda = torch.cuda.is_available()
use_cudause_cud = torch.cuda.is_available()
dtype = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
# Train, Validate, Test. Heavily inspired by Kevinzakka https://github.com/kevinzakka/DenseNet/blob/master/data_loader.py
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
valid_size=0.1
# define transforms
valid_transform = transforms.Compose([
transforms.ToTensor(),
normalize
])
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
# load the dataset
train_dataset = datasets.CIFAR10(root="data", train=True,
download=True, transform=train_transform)
valid_dataset = datasets.CIFAR10(root="data", train=True,
download=True, transform=valid_transform)
num_train = len(train_dataset)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train)) #5w张图片的10%用来当做验证集
np.random.seed(42)# 42
np.random.shuffle(indices) # 随机乱序[0,1,...,49999]
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx) # 这个很有意思
valid_sampler = SubsetRandomSampler(valid_idx)
###################################################################################
# ------------------------- 使用不同的批次大小 ------------------------------------
###################################################################################
show_step=2 # 批次大,show_step就小点
max_epoch=80 # 训练最大epoch数目
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=256, sampler=train_sampler)
valid_loader = torch.utils.data.DataLoader(valid_dataset,
batch_size=256, sampler=valid_sampler)
test_transform = transforms.Compose([
transforms.ToTensor(), normalize
])
test_dataset = datasets.CIFAR10(root="data",
train=False,
download=True,transform=test_transform)
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=256,
shuffle=True)
###Output
Files already downloaded and verified
Files already downloaded and verified
Files already downloaded and verified
###Markdown
Step 2: Model Config 32 缩放5次到 1x1@1024 From https://github.com/kuangliu/pytorch-cifar import torchimport torch.nn as nnimport torch.nn.functional as Fclass Block(nn.Module): '''Depthwise conv + Pointwise conv''' def __init__(self, in_planes, out_planes, stride=1): super(Block, self).__init__() 分组卷积数=输入通道数 self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False) self.bn1 = nn.BatchNorm2d(in_planes) self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False) one_conv_kernel_size = 3 self.conv1D= nn.Conv1d(1, out_planes, one_conv_kernel_size, stride=1,padding=1,groups=1,dilation=1,bias=False) 在__init__初始化 self.bn2 = nn.BatchNorm2d(out_planes) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) -------------------------- Attention ----------------------- w = F.avg_pool2d(x,x.shape[-1]) 最好在初始化层定义好 print(w.shape) [bs,in_Channel,1,1] w = w.view(w.shape[0],1,w.shape[1]) [bs,1,in_Channel] one_conv_filter = nn.Conv1d(1, out_channel, one_conv_kernel_size, stride=1,padding=1,groups=1,dilation=1) 在__init__初始化 [bs,out_channel,in_Channel] w = self.conv1D(w) w = 0.5*F.tanh(w) [-0.5,+0.5] -------------- softmax --------------------------- print(w.shape) w = w.view(w.shape[0],w.shape[1],w.shape[2],1,1) print(w.shape) ------------------------- fusion -------------------------- out=out.view(out.shape[0],1,out.shape[1],out.shape[2],out.shape[3]) print("x size:",out.shape) out=out*w print("after fusion x size:",out.shape) out=out.sum(dim=2) out = F.relu(self.bn2(out)) return outclass MobileNet(nn.Module): (128,2) means conv planes=128, conv stride=2, by default conv stride=1 cfg = [64, (128,2), 128, (256,2), 256, (512,2), 512, 512, 512, 512, 512, (1024,2), 1024] def __init__(self, num_classes=10): super(MobileNet, self).__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(32) self.layers = self._make_layers(in_planes=32) 自动化构建层 self.linear = nn.Linear(1024, num_classes) def _make_layers(self, in_planes): layers = [] for x in self.cfg: out_planes = x if isinstance(x, int) else x[0] stride = 1 if isinstance(x, int) else x[1] layers.append(Block(in_planes, out_planes, stride)) in_planes = out_planes return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layers(out) out = F.avg_pool2d(out, 2) out = out.view(out.size(0), -1) out = self.linear(out) return out
###Code
# 32 缩放5次到 1x1@1024
# From https://github.com/kuangliu/pytorch-cifar
import torch
import torch.nn as nn
import torch.nn.functional as F
class Block_Attention_HALF(nn.Module):
'''Depthwise conv + Pointwise conv'''
def __init__(self, in_planes, out_planes, stride=1):
super(Block_Attention_HALF, self).__init__()
# 分组卷积数=输入通道数
self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
#------------------------ 一半 ------------------------------
self.conv2 = nn.Conv2d(in_planes, int(out_planes), kernel_size=1, stride=1, padding=0, bias=False)
#------------------------------------------------------------
self.bn2 = nn.BatchNorm2d(out_planes)
def forward(self, x):
out = F.relu6(self.bn1(self.conv1(x)))
# -------------------------- Attention -----------------------
w = F.avg_pool2d(x,x.shape[-1]) #最好在初始化层定义好
#print(w.shape)
# [bs,in_Channel,1,1]
in_channel=w.shape[1]
w=w[0]
#w=torch.randn(w[0].shape).cuda()*0.1
a=torch.randn(1).cuda()*0.1
if a>0.35:
print(w.shape)
print(w)
if a>0.35:
print(self.conv2.weight)
# ------------------------- fusion --------------------------
# conv 1x1
out=self.conv2(out)
# ----------------------- 试一试不要用relu -------------------------------
out = F.relu6(self.bn2(out))
return out
class Block_Attention(nn.Module):
'''Depthwise conv + Pointwise conv'''
def __init__(self, in_planes, out_planes, stride=1):
super(Block_Attention, self).__init__()
# 分组卷积数=输入通道数
self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
#self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
one_conv_kernel_size = 17 # [3,7,9]
self.conv1D= nn.Conv1d(1, out_planes, one_conv_kernel_size, stride=1,padding=8,groups=1,dilation=1,bias=False) # 在__init__初始化
self.bn2 = nn.BatchNorm2d(out_planes)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
# -------------------------- Attention -----------------------
w = F.avg_pool2d(x,x.shape[-1]) #最好在初始化层定义好
#print(w.shape)
# [bs,in_Channel,1,1]
in_channel=w.shape[1]
#w = w.view(w.shape[0],1,w.shape[1])
# [bs,1,in_Channel]
# 对这批数据取平均 且保留第0维
#w= w.mean(dim=0,keepdim=True)
# MAX=w.shape[0]
# NUM=torch.floor(MAX*torch.rand(1)).long()
# if NUM>=0 and NUM<MAX:
# w=w[NUM]
# else:
# w=w[0]
w=w[0]
w=w.view(1,1,in_channel)
# [bs=1,1,in_Channel]
# one_conv_filter = nn.Conv1d(1, out_channel, one_conv_kernel_size, stride=1,padding=1,groups=1,dilation=1) # 在__init__初始化
# [bs=1,out_channel,in_Channel]
w = self.conv1D(w)
# [bs=1,out_channel,in_Channel]
w = 0.5*F.tanh(w) # [-0.5,+0.5]
# [bs=1,out_channel,in_Channel]
w=w.view(w.shape[1],w.shape[2],1,1)
# [out_channel,in_Channel,1,1]
# -------------- softmax ---------------------------
#print(w.shape)
# ------------------------- fusion --------------------------
# conv 1x1
out=F.conv2d(out,w,bias=None,stride=1,groups=1,dilation=1)
out = F.relu(self.bn2(out))
return out
class Block(nn.Module):
'''Depthwise conv + Pointwise conv'''
def __init__(self, in_planes, out_planes, stride=1):
super(Block, self).__init__()
# 分组卷积数=输入通道数
self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
return out
class MobileNet(nn.Module):
# (128,2) means conv planes=128, conv stride=2, by default conv stride=1
#cfg = [64, (128,2), 128, (256,2), 256, (512,2), 512, 512, 512, 512, 512, (1024,2), 1024]
#cfg = [64, (128,2), 128, (256,2), 256, (512,2), 512, 512, 512, 512, 512, (1024,2), [1024,1]]
cfg = [64, (128,2), 128, 256, 256, (512,2), 512, [512,1], [512,1],[512,1], [512,1], [1024,1], [1024,1]]
def __init__(self, num_classes=10):
super(MobileNet, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32) # 自动化构建层
self.linear = nn.Linear(1024, num_classes)
def _make_layers(self, in_planes):
layers = []
for x in self.cfg:
if isinstance(x, int):
out_planes = x
stride = 1
layers.append(Block(in_planes, out_planes, stride))
elif isinstance(x, tuple):
out_planes = x[0]
stride = x[1]
layers.append(Block(in_planes, out_planes, stride))
# AC层通过list存放设置参数
elif isinstance(x, list):
out_planes= x[0]
stride = x[1] if len(x)==2 else 1
layers.append(Block_Attention_HALF(in_planes, out_planes, stride))
else:
pass
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
# From https://github.com/Z0m6ie/CIFAR-10_PyTorch
#model = mobilenet(num_classes=10, large_img=False)
# From https://github.com/kuangliu/pytorch-cifar
if torch.cuda.is_available():
model=MobileNet(10).cuda()
else:
model=MobileNet(10)
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
#scheduler = StepLR(optimizer, step_size=70, gamma=0.1)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[50,70,75,80], gamma=0.1)
criterion = nn.CrossEntropyLoss()
# Implement validation
def train(epoch):
model.train()
#writer = SummaryWriter()
for batch_idx, (data, target) in enumerate(train_loader):
if use_cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
correct = 0
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).sum()
loss = criterion(output, target)
loss.backward()
accuracy = 100. * (correct.cpu().numpy()/ len(output))
optimizer.step()
if batch_idx % 5*show_step == 0:
# if batch_idx % 2*show_step == 0:
# print(model.layers[1].conv1D.weight.shape)
# print(model.layers[1].conv1D.weight[0:2][0:2])
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}, Accuracy: {:.2f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item(), accuracy))
# f1=open("Cifar10_INFO.txt","a+")
# f1.write("\n"+'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}, Accuracy: {:.2f}'.format(
# epoch, batch_idx * len(data), len(train_loader.dataset),
# 100. * batch_idx / len(train_loader), loss.item(), accuracy))
# f1.close()
#writer.add_scalar('Loss/Loss', loss.item(), epoch)
#writer.add_scalar('Accuracy/Accuracy', accuracy, epoch)
scheduler.step()
def validate(epoch):
model.eval()
#writer = SummaryWriter()
valid_loss = 0
correct = 0
for data, target in valid_loader:
if use_cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data)
valid_loss += F.cross_entropy(output, target, size_average=False).item() # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).sum()
valid_loss /= len(valid_idx)
accuracy = 100. * correct.cpu().numpy() / len(valid_idx)
print('\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
valid_loss, correct, len(valid_idx),
100. * correct / len(valid_idx)))
# f1=open("Cifar10_INFO.txt","a+")
# f1.write('\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
# valid_loss, correct, len(valid_idx),
# 100. * correct / len(valid_idx)))
# f1.close()
#writer.add_scalar('Loss/Validation_Loss', valid_loss, epoch)
#writer.add_scalar('Accuracy/Validation_Accuracy', accuracy, epoch)
return valid_loss, accuracy
# Fix best model
def test(epoch):
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if use_cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target, size_average=False).item() # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct.cpu().numpy() / len(test_loader.dataset)))
# f1=open("Cifar10_INFO.txt","a+")
# f1.write('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
# test_loss, correct, len(test_loader.dataset),
# 100. * correct.cpu().numpy() / len(test_loader.dataset)))
# f1.close()
def save_best(loss, accuracy, best_loss, best_acc):
if best_loss == None:
best_loss = loss
best_acc = accuracy
file = 'saved_models/best_save_model.p'
torch.save(model.state_dict(), file)
elif loss < best_loss and accuracy > best_acc:
best_loss = loss
best_acc = accuracy
file = 'saved_models/best_save_model.p'
torch.save(model.state_dict(), file)
return best_loss, best_acc
# Fantastic logger for tensorboard and pytorch,
# run tensorboard by opening a new terminal and run "tensorboard --logdir runs"
# open tensorboard at http://localhost:6006/
from tensorboardX import SummaryWriter
best_loss = None
best_acc = None
import time
SINCE=time.time()
for epoch in range(max_epoch):
train(epoch)
loss, accuracy = validate(epoch)
best_loss, best_acc = save_best(loss, accuracy, best_loss, best_acc)
NOW=time.time()
DURINGS=NOW-SINCE
SINCE=NOW
print("the time of this epoch:[{} s]".format(DURINGS))
if epoch>=10 and (epoch-10)%2==0:
test(epoch)
# writer = SummaryWriter()
# writer.export_scalars_to_json("./all_scalars.json")
# writer.close()
#---------------------------- Test ------------------------------
test(epoch)
###Output
Train Epoch: 0 [0/50000 (0%)] Loss: 2.327121, Accuracy: 8.20
Train Epoch: 0 [1280/50000 (3%)] Loss: 2.278716, Accuracy: 13.67
Train Epoch: 0 [2560/50000 (6%)] Loss: 2.095520, Accuracy: 23.83
Train Epoch: 0 [3840/50000 (9%)] Loss: 2.080643, Accuracy: 25.78
Train Epoch: 0 [5120/50000 (11%)] Loss: 2.028720, Accuracy: 28.12
Train Epoch: 0 [6400/50000 (14%)] Loss: 1.943042, Accuracy: 27.34
Train Epoch: 0 [7680/50000 (17%)] Loss: 1.917816, Accuracy: 25.00
Train Epoch: 0 [8960/50000 (20%)] Loss: 1.854106, Accuracy: 32.81
Train Epoch: 0 [10240/50000 (23%)] Loss: 1.816418, Accuracy: 28.91
Train Epoch: 0 [11520/50000 (26%)] Loss: 1.893929, Accuracy: 26.95
Train Epoch: 0 [12800/50000 (28%)] Loss: 1.755368, Accuracy: 36.72
Train Epoch: 0 [14080/50000 (31%)] Loss: 1.649112, Accuracy: 37.89
Train Epoch: 0 [15360/50000 (34%)] Loss: 1.643022, Accuracy: 39.84
Train Epoch: 0 [16640/50000 (37%)] Loss: 1.724194, Accuracy: 35.55
Train Epoch: 0 [17920/50000 (40%)] Loss: 1.572142, Accuracy: 44.92
Train Epoch: 0 [19200/50000 (43%)] Loss: 1.647967, Accuracy: 35.94
Train Epoch: 0 [20480/50000 (45%)] Loss: 1.824593, Accuracy: 37.89
Train Epoch: 0 [21760/50000 (48%)] Loss: 1.624053, Accuracy: 39.45
Train Epoch: 0 [23040/50000 (51%)] Loss: 1.584269, Accuracy: 42.19
Train Epoch: 0 [24320/50000 (54%)] Loss: 1.654619, Accuracy: 35.55
Train Epoch: 0 [25600/50000 (57%)] Loss: 1.640578, Accuracy: 36.72
Train Epoch: 0 [26880/50000 (60%)] Loss: 1.598266, Accuracy: 35.55
Train Epoch: 0 [28160/50000 (62%)] Loss: 1.618258, Accuracy: 33.98
Train Epoch: 0 [29440/50000 (65%)] Loss: 1.465599, Accuracy: 48.05
Train Epoch: 0 [30720/50000 (68%)] Loss: 1.573250, Accuracy: 42.97
Train Epoch: 0 [32000/50000 (71%)] Loss: 1.416531, Accuracy: 46.88
Train Epoch: 0 [33280/50000 (74%)] Loss: 1.717389, Accuracy: 36.33
Train Epoch: 0 [34560/50000 (77%)] Loss: 1.460610, Accuracy: 46.09
Train Epoch: 0 [35840/50000 (80%)] Loss: 1.488409, Accuracy: 46.09
Train Epoch: 0 [37120/50000 (82%)] Loss: 1.434407, Accuracy: 47.66
Train Epoch: 0 [38400/50000 (85%)] Loss: 1.499902, Accuracy: 48.44
Train Epoch: 0 [39680/50000 (88%)] Loss: 1.404114, Accuracy: 47.66
Train Epoch: 0 [40960/50000 (91%)] Loss: 1.440864, Accuracy: 48.44
Train Epoch: 0 [42240/50000 (94%)] Loss: 1.312991, Accuracy: 50.39
Train Epoch: 0 [43520/50000 (97%)] Loss: 1.498859, Accuracy: 47.66
Train Epoch: 0 [35000/50000 (99%)] Loss: 1.404046, Accuracy: 48.00
Validation set: Average loss: 1.8436, Accuracy: 2044/5000 (40.00%)
the time of this epoch:[54.28705048561096 s]
Train Epoch: 1 [0/50000 (0%)] Loss: 1.267633, Accuracy: 54.30
Train Epoch: 1 [1280/50000 (3%)] Loss: 1.216760, Accuracy: 58.98
Train Epoch: 1 [2560/50000 (6%)] Loss: 1.304639, Accuracy: 48.44
Train Epoch: 1 [3840/50000 (9%)] Loss: 1.378345, Accuracy: 49.61
Train Epoch: 1 [5120/50000 (11%)] Loss: 1.334210, Accuracy: 52.73
Train Epoch: 1 [6400/50000 (14%)] Loss: 1.174827, Accuracy: 56.64
Train Epoch: 1 [7680/50000 (17%)] Loss: 1.195123, Accuracy: 58.20
Train Epoch: 1 [8960/50000 (20%)] Loss: 1.235761, Accuracy: 53.12
Train Epoch: 1 [10240/50000 (23%)] Loss: 1.214649, Accuracy: 53.52
Train Epoch: 1 [11520/50000 (26%)] Loss: 1.275602, Accuracy: 54.30
Train Epoch: 1 [12800/50000 (28%)] Loss: 1.289325, Accuracy: 53.52
Train Epoch: 1 [14080/50000 (31%)] Loss: 1.328577, Accuracy: 51.95
Train Epoch: 1 [15360/50000 (34%)] Loss: 1.182112, Accuracy: 57.03
Train Epoch: 1 [16640/50000 (37%)] Loss: 1.237077, Accuracy: 54.30
Train Epoch: 1 [17920/50000 (40%)] Loss: 1.229070, Accuracy: 58.98
Train Epoch: 1 [19200/50000 (43%)] Loss: 1.311842, Accuracy: 59.77
Train Epoch: 1 [20480/50000 (45%)] Loss: 1.229453, Accuracy: 48.83
Train Epoch: 1 [21760/50000 (48%)] Loss: 1.152753, Accuracy: 57.03
Train Epoch: 1 [23040/50000 (51%)] Loss: 1.128782, Accuracy: 57.42
Train Epoch: 1 [24320/50000 (54%)] Loss: 1.215612, Accuracy: 55.86
Train Epoch: 1 [25600/50000 (57%)] Loss: 1.287735, Accuracy: 56.25
Train Epoch: 1 [26880/50000 (60%)] Loss: 1.137285, Accuracy: 59.38
Train Epoch: 1 [28160/50000 (62%)] Loss: 1.145961, Accuracy: 55.47
Train Epoch: 1 [29440/50000 (65%)] Loss: 1.056359, Accuracy: 62.89
Train Epoch: 1 [30720/50000 (68%)] Loss: 1.133746, Accuracy: 60.55
Train Epoch: 1 [32000/50000 (71%)] Loss: 1.001434, Accuracy: 63.67
Train Epoch: 1 [33280/50000 (74%)] Loss: 1.150771, Accuracy: 60.55
Train Epoch: 1 [34560/50000 (77%)] Loss: 1.254082, Accuracy: 55.47
Train Epoch: 1 [35840/50000 (80%)] Loss: 1.113859, Accuracy: 58.59
Train Epoch: 1 [37120/50000 (82%)] Loss: 1.236987, Accuracy: 58.20
Train Epoch: 1 [38400/50000 (85%)] Loss: 1.168612, Accuracy: 54.69
Train Epoch: 1 [39680/50000 (88%)] Loss: 1.188283, Accuracy: 58.20
Train Epoch: 1 [40960/50000 (91%)] Loss: 1.160031, Accuracy: 56.64
Train Epoch: 1 [42240/50000 (94%)] Loss: 1.019460, Accuracy: 62.11
Train Epoch: 1 [43520/50000 (97%)] Loss: 1.012998, Accuracy: 67.19
Train Epoch: 1 [35000/50000 (99%)] Loss: 1.057628, Accuracy: 62.50
Validation set: Average loss: 1.3207, Accuracy: 2727/5000 (54.00%)
the time of this epoch:[33.91703414916992 s]
Train Epoch: 2 [0/50000 (0%)] Loss: 0.968982, Accuracy: 67.19
torch.Size([512, 1, 1])
tensor([[[ 0.2174]],
[[ 0.1541]],
[[ 0.4959]],
[[ 0.0565]],
[[ 0.1220]],
[[ 0.3681]],
[[ 0.0000]],
[[ 0.2147]],
[[ 0.0231]],
[[ 0.1259]],
[[ 0.2669]],
[[ 0.0610]],
[[ 0.0372]],
[[ 0.1093]],
[[ 0.1934]],
[[ 0.3064]],
[[ 0.0158]],
[[ 0.4384]],
[[ 0.1512]],
[[ 0.1546]],
[[ 0.2365]],
[[ 0.1587]],
[[ 0.2405]],
[[ 0.2047]],
[[ 0.2710]],
[[ 0.2804]],
[[ 0.0842]],
[[ 0.0858]],
[[ 0.4488]],
[[ 0.0755]],
[[ 0.0814]],
[[ 0.0673]],
[[ 0.1707]],
[[ 0.3630]],
[[ 0.3434]],
[[ 0.5405]],
[[ 0.1279]],
[[ 0.3384]],
[[ 0.1148]],
[[ 0.1510]],
[[ 0.0552]],
[[ 0.1944]],
[[ 0.0857]],
[[ 0.1905]],
[[ 0.1459]],
[[ 0.0941]],
[[ 0.1304]],
[[ 0.1328]],
[[ 0.0728]],
[[ 0.4037]],
[[ 0.3247]],
[[ 0.2751]],
[[ 0.1383]],
[[ 0.2457]],
[[ 0.4788]],
[[ 0.1541]],
[[ 0.1419]],
[[ 0.2528]],
[[ 0.0284]],
[[ 0.0632]],
[[ 0.2566]],
[[ 0.0626]],
[[ 0.0960]],
[[ 0.3188]],
[[ 0.1912]],
[[ 0.1765]],
[[ 0.3226]],
[[ 0.1332]],
[[ 0.3319]],
[[ 0.1522]],
[[ 0.2265]],
[[ 0.3387]],
[[ 0.2954]],
[[ 0.0030]],
[[ 0.0918]],
[[ 0.2360]],
[[ 0.0753]],
[[ 0.1641]],
[[ 0.1222]],
[[ 0.3818]],
[[ 0.2185]],
[[ 0.0515]],
[[ 0.3038]],
[[ 0.0729]],
[[ 0.1216]],
[[ 0.1995]],
[[ 0.6078]],
[[ 0.0300]],
[[ 0.1174]],
[[ 0.0605]],
[[ 0.3508]],
[[ 0.0986]],
[[ 0.4355]],
[[ 0.0495]],
[[ 0.2055]],
[[ 0.2676]],
[[ 0.2805]],
[[ 0.3344]],
[[ 0.0575]],
[[ 0.3172]],
[[ 0.0002]],
[[ 0.1524]],
[[ 0.0000]],
[[ 0.1894]],
[[ 0.0092]],
[[ 0.1733]],
[[ 0.2533]],
[[ 0.2213]],
[[ 0.1664]],
[[ 0.1319]],
[[ 0.1942]],
[[ 0.0160]],
[[ 0.2838]],
[[ 0.3026]],
[[ 0.1324]],
[[ 0.0180]],
[[ 0.0962]],
[[ 0.0881]],
[[ 0.1480]],
[[ 0.0383]],
[[ 0.1298]],
[[ 0.3726]],
[[ 0.0800]],
[[ 0.0360]],
[[ 0.0884]],
[[ 0.1566]],
[[ 0.0322]],
[[ 0.1002]],
[[ 0.2686]],
[[ 0.0774]],
[[ 0.3240]],
[[ 0.2502]],
[[ 0.0133]],
[[ 0.1065]],
[[ 0.3009]],
[[ 0.5554]],
[[ 0.1408]],
[[ 0.0858]],
[[ 0.0815]],
[[ 0.3134]],
[[ 0.1351]],
[[ 0.0830]],
[[ 0.4286]],
[[ 0.2431]],
[[ 0.2765]],
[[ 0.1137]],
[[ 0.2030]],
[[ 0.1605]],
[[ 0.1376]],
[[ 0.2980]],
[[ 0.2419]],
[[ 0.2873]],
[[ 0.0260]],
[[ 0.0597]],
[[ 0.1975]],
[[ 0.1145]],
[[ 0.0675]],
[[ 0.0000]],
[[ 0.1615]],
[[ 0.2269]],
[[ 0.1704]],
[[ 0.2826]],
[[ 0.3217]],
[[ 0.1610]],
[[ 0.2386]],
[[ 0.1047]],
[[ 0.1907]],
[[ 0.1330]],
[[ 0.0483]],
[[ 0.0512]],
[[ 0.0977]],
[[ 0.2031]],
[[ 0.1738]],
[[ 0.2557]],
[[ 0.1391]],
[[ 0.0000]],
[[ 0.0573]],
[[ 0.0270]],
[[ 0.4593]],
[[ 0.2537]],
[[ 0.2137]],
[[ 0.1883]],
[[ 0.1206]],
[[ 0.1067]],
[[ 0.5257]],
[[ 0.1995]],
[[ 0.4204]],
[[ 0.0258]],
[[ 0.1747]],
[[ 0.4512]],
[[ 0.1633]],
[[ 0.3409]],
[[ 0.0352]],
[[ 0.2410]],
[[ 0.0962]],
[[ 0.3042]],
[[ 0.1322]],
[[ 0.1478]],
[[ 0.3227]],
[[ 0.1264]],
[[ 0.0611]],
[[ 0.2094]],
[[ 0.2577]],
[[ 0.0732]],
[[ 0.2121]],
[[ 0.1629]],
[[ 0.2818]],
[[ 0.0980]],
[[ 0.2168]],
[[ 0.0378]],
[[ 0.4105]],
[[ 0.2743]],
[[ 0.3069]],
[[ 0.0957]],
[[ 0.1690]],
[[ 0.1454]],
[[ 0.0622]],
[[ 0.0975]],
[[ 0.0050]],
[[ 0.2064]],
[[ 0.0847]],
[[ 0.1084]],
[[ 0.1157]],
[[ 0.4936]],
[[ 0.5444]],
[[ 0.1832]],
[[ 0.0342]],
[[ 0.0575]],
[[ 0.2727]],
[[ 0.1891]],
[[ 0.3053]],
[[ 0.0180]],
[[ 0.0266]],
[[ 0.1030]],
[[ 0.1496]],
[[ 0.1242]],
[[ 0.0840]],
[[ 0.2062]],
[[ 0.1069]],
[[ 0.3194]],
[[ 0.1648]],
[[ 0.1011]],
[[ 0.1927]],
[[ 0.2041]],
[[ 0.0679]],
[[ 0.4755]],
[[ 0.2818]],
[[ 0.0835]],
[[ 0.1725]],
[[ 0.2029]],
[[ 0.2559]],
[[ 0.1240]],
[[ 0.0279]],
[[ 0.1571]],
[[ 0.1176]],
[[ 0.0365]],
[[ 0.3586]],
[[ 0.0105]],
[[ 0.1912]],
[[ 0.2890]],
[[ 0.0523]],
[[ 0.0442]],
[[ 0.0374]],
[[ 0.5378]],
[[ 0.2771]],
[[ 0.0320]],
[[ 0.0803]],
[[ 0.0635]],
[[ 0.2179]],
[[ 0.0829]],
[[ 0.3014]],
[[ 0.0137]],
[[ 0.2258]],
[[ 0.5300]],
[[ 0.3379]],
[[ 0.1979]],
[[ 0.4659]],
[[ 0.0584]],
[[ 0.1584]],
[[ 0.2106]],
[[ 0.2430]],
[[ 0.1161]],
[[ 0.0523]],
[[ 0.0117]],
[[ 0.2503]],
[[ 0.4393]],
[[ 0.4354]],
[[ 0.1498]],
[[ 0.3104]],
[[ 0.0439]],
[[ 0.1367]],
[[ 0.1319]],
[[ 0.1132]],
[[ 0.0716]],
[[ 0.6277]],
[[ 0.4754]],
[[ 0.1055]],
[[ 0.1286]],
[[ 0.1103]],
[[ 0.0187]],
[[ 0.3647]],
[[ 0.0390]],
[[ 0.1691]],
[[ 0.0021]],
[[ 0.0414]],
[[ 0.1217]],
[[ 0.1894]],
[[ 0.1363]],
[[ 0.2356]],
[[ 0.4957]],
[[ 0.3964]],
[[ 0.2413]],
[[ 0.2454]],
[[ 0.1083]],
[[ 0.1041]],
[[ 0.4132]],
[[ 0.2111]],
[[ 0.0551]],
[[ 0.3497]],
[[ 0.1396]],
[[ 0.0691]],
[[ 0.0001]],
[[ 0.3888]],
[[ 0.0779]],
[[ 0.0630]],
[[ 0.1742]],
[[ 0.5267]],
[[ 0.4330]],
[[ 0.0923]],
[[ 0.0981]],
[[ 0.4088]],
[[ 0.0248]],
[[ 0.0413]],
[[ 0.0334]],
[[ 0.0547]],
[[ 0.4676]],
[[ 0.0241]],
[[ 0.1699]],
[[ 0.0032]],
[[ 0.1315]],
[[ 0.4180]],
[[ 0.5453]],
[[ 0.2254]],
[[ 0.1046]],
[[ 0.2895]],
[[ 0.6735]],
[[ 0.3004]],
[[ 0.3671]],
[[ 0.2527]],
[[ 0.0533]],
[[ 0.2425]],
[[ 0.0435]],
[[ 0.0591]],
[[ 0.0685]],
[[ 0.2730]],
[[ 0.3808]],
[[ 0.0953]],
[[ 0.3604]],
[[ 0.1075]],
[[ 0.1030]],
[[ 0.1514]],
[[ 0.0178]],
[[ 0.2462]],
[[ 0.3159]],
[[ 0.0755]],
[[ 0.0736]],
[[ 0.0441]],
[[ 0.0610]],
[[ 0.0593]],
[[ 0.1053]],
[[ 0.0170]],
[[ 0.3954]],
[[ 0.1552]],
[[ 0.2231]],
[[ 0.0989]],
[[ 0.0829]],
[[ 0.2211]],
[[ 0.3112]],
[[ 0.0245]],
[[ 0.2048]],
[[ 0.5629]],
[[ 0.1337]],
[[ 0.1132]],
[[ 0.2910]],
[[ 0.0139]],
[[ 0.2465]],
[[ 0.0503]],
[[ 0.3524]],
[[ 0.0709]],
[[ 0.3170]],
[[ 0.1131]],
[[ 0.1253]],
[[ 0.4896]],
[[ 0.0896]],
[[ 0.0682]],
[[ 0.0838]],
[[ 0.1490]],
[[ 0.0618]],
[[ 0.0512]],
[[ 0.1471]],
[[ 0.2254]],
[[ 0.1592]],
[[ 0.0000]],
[[ 0.3969]],
[[ 0.0000]],
[[ 0.0251]],
[[ 0.0991]],
[[ 0.1762]],
[[ 0.0275]],
[[ 0.0865]],
[[ 0.1001]],
[[ 0.0592]],
[[ 0.5401]],
[[ 0.0392]],
[[ 0.2023]],
[[ 0.2308]],
[[ 0.2612]],
[[ 0.1093]],
[[ 0.2091]],
[[ 0.0698]],
[[ 0.1769]],
[[ 0.1024]],
[[ 0.1579]],
[[ 0.0965]],
[[ 0.1437]],
[[ 0.0061]],
[[ 0.0988]],
[[ 0.7795]],
[[ 0.0453]],
[[ 0.3061]],
[[ 0.1007]],
[[ 0.1952]],
[[ 0.2788]],
[[ 0.1240]],
[[ 0.3064]],
[[ 0.0506]],
[[ 0.0792]],
[[ 0.2305]],
[[ 0.2671]],
[[ 0.1010]],
[[ 0.2909]],
[[ 0.1807]],
[[ 0.0000]],
[[ 0.1316]],
[[ 0.1832]],
[[ 0.2906]],
[[ 0.0447]],
[[ 0.0338]],
[[ 0.1614]],
[[ 0.3076]],
[[ 0.4740]],
[[ 0.1396]],
[[ 0.3764]],
[[ 0.1293]],
[[ 0.2245]],
[[ 0.0555]],
[[ 0.3014]],
[[ 0.2026]],
[[ 0.3747]],
[[ 0.0423]],
[[ 0.3383]],
[[ 0.0708]],
[[ 0.3163]],
[[ 0.0000]],
[[ 0.0000]],
[[ 0.1990]],
[[ 0.4578]],
[[ 0.3804]],
[[ 0.0291]],
[[ 0.0797]],
[[ 0.2601]],
[[ 0.1640]],
[[ 0.1594]],
[[ 0.1703]],
[[ 0.2009]],
[[ 0.1307]],
[[ 0.0000]],
[[ 0.2022]],
[[ 0.1541]],
[[ 0.2416]],
[[ 0.1625]],
[[ 0.1717]],
[[ 0.0839]],
[[ 0.1004]],
[[ 0.0480]],
[[ 0.0728]],
[[ 0.3729]],
[[ 0.1518]],
[[ 0.1647]],
[[ 0.0635]],
[[ 0.1071]],
[[ 0.1565]],
[[ 0.3235]],
[[ 0.0570]],
[[ 0.2246]],
[[ 0.2130]],
[[ 0.6951]],
[[ 0.3254]],
[[ 0.1814]],
[[ 0.0000]],
[[ 0.3961]],
[[ 0.3082]],
[[ 0.3926]],
[[ 0.2177]],
[[ 0.1495]],
[[ 0.0932]],
[[ 0.2504]],
[[ 0.1567]],
[[ 0.2674]],
[[ 0.0448]],
[[ 0.3087]],
[[ 0.2093]]], device='cuda:0')
Parameter containing:
tensor([[[[ 1.4749e-03]],
[[-4.3017e-03]],
[[ 2.7908e-02]],
...,
[[-2.5615e-02]],
[[-1.3616e-03]],
[[-2.6031e-02]]],
[[[-3.5058e-02]],
[[-7.7533e-03]],
[[-2.3032e-02]],
...,
[[ 1.4386e-02]],
[[ 1.3894e-02]],
[[-2.7411e-02]]],
[[[ 1.5047e-02]],
[[ 1.0508e-01]],
[[ 1.8795e-02]],
...,
[[ 5.7905e-02]],
[[ 3.3314e-02]],
[[ 3.0692e-02]]],
...,
[[[-4.6669e-03]],
[[-1.3845e-02]],
[[ 3.4161e-04]],
...,
[[-1.1091e-02]],
[[-2.4607e-02]],
[[ 1.6799e-02]]],
[[[-3.8191e-02]],
[[-3.7605e-03]],
[[ 9.6972e-04]],
...,
[[ 2.8124e-02]],
[[-5.2659e-03]],
[[-3.6028e-02]]],
[[[-2.6112e-02]],
[[-3.1771e-02]],
[[ 3.2078e-02]],
...,
[[ 1.7915e-02]],
[[-2.0139e-02]],
[[-2.2855e-03]]]], device='cuda:0')
###Markdown
Step 3: Test
###Code
test(epoch)
###Output
Test set: Average loss: 0.6902, Accuracy: 8877/10000 (88.77%)
###Markdown
第一次 scale 位于[0,1] 
###Code
# 查看训练过程的信息
import matplotlib.pyplot as plt
def parse(in_file,flag):
num=-1
ys=list()
xs=list()
losses=list()
with open(in_file,"r") as reader:
for aLine in reader:
#print(aLine)
res=[e for e in aLine.strip('\n').split(" ")]
if res[0]=="Train" and flag=="Train":
num=num+1
ys.append(float(res[-1]))
xs.append(int(num))
losses.append(float(res[-3].split(',')[0]))
if res[0]=="Validation" and flag=="Validation":
num=num+1
xs.append(int(num))
tmp=[float(e) for e in res[-2].split('/')]
ys.append(100*float(tmp[0]/tmp[1]))
losses.append(float(res[-4].split(',')[0]))
plt.figure(1)
plt.plot(xs,ys,'ro')
plt.figure(2)
plt.plot(xs, losses, 'ro')
plt.show()
def main():
in_file="D://INFO.txt"
# 显示训练阶段的正确率和Loss信息
parse(in_file,"Train") # "Validation"
# 显示验证阶段的正确率和Loss信息
#parse(in_file,"Validation") # "Validation"
if __name__=="__main__":
main()
# 查看训练过程的信息
import matplotlib.pyplot as plt
def parse(in_file,flag):
num=-1
ys=list()
xs=list()
losses=list()
with open(in_file,"r") as reader:
for aLine in reader:
#print(aLine)
res=[e for e in aLine.strip('\n').split(" ")]
if res[0]=="Train" and flag=="Train":
num=num+1
ys.append(float(res[-1]))
xs.append(int(num))
losses.append(float(res[-3].split(',')[0]))
if res[0]=="Validation" and flag=="Validation":
num=num+1
xs.append(int(num))
tmp=[float(e) for e in res[-2].split('/')]
ys.append(100*float(tmp[0]/tmp[1]))
losses.append(float(res[-4].split(',')[0]))
plt.figure(1)
plt.plot(xs,ys,'r-')
plt.figure(2)
plt.plot(xs, losses, 'r-')
plt.show()
def main():
in_file="D://INFO.txt"
# 显示训练阶段的正确率和Loss信息
parse(in_file,"Train") # "Validation"
# 显示验证阶段的正确率和Loss信息
parse(in_file,"Validation") # "Validation"
if __name__=="__main__":
main()
###Output
_____no_output_____ |
AI/County Wise Confirmed COVID-19 Cases.ipynb | ###Markdown
Importing Libraries
###Code
import pandas as pd
import numpy as np
import tensorflow as tf
keras = tf.keras
from sklearn.preprocessing import RobustScaler
import plotly.graph_objects as go
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
###Output
_____no_output_____
###Markdown
Date Pre-Processing
###Code
confirmed_df = pd.read_csv('dataset_final.csv')
confirmed_df.head()
confirmed_df = confirmed_df.groupby("Province/State").sum().reset_index()
print("DONE 1")
County_confirmed = confirmed_df["Province/State"].values
print("DONE 2")
confirmed_df = confirmed_df.drop(['Province/State'], axis=1)
print("DONE 3")
confirmed_df = confirmed_df[confirmed_df.columns].T
print("DONE 4")
confirmed_df.columns = County_confirmed
print("DONE 5")
Date_index = confirmed_df.index
print("DONE 6")
confirmed_df = confirmed_df.diff().fillna(0)
print("DONE 7")
confirmed_df = confirmed_df.abs()
print("DONE 8")
confirmed_df.head()
###Output
DONE 1
DONE 2
DONE 3
DONE 4
DONE 5
DONE 6
DONE 7
DONE 8
###Markdown
Scalling the data
###Code
sc = RobustScaler()
confirmed_df_scaled = sc.fit_transform(confirmed_df)
confirmed_df_scaled = pd.DataFrame(confirmed_df_scaled)
###Output
_____no_output_____
###Markdown
Enter the number of days you want to predict
###Code
DAYS_TO_PREDICT = int(input('Enter Number Of Days You Want to Predict : '))
predict_confirm_index = pd.date_range(start=Date_index[-1],periods=DAYS_TO_PREDICT + 1,closed='right')
predict_confirm_index = predict_confirm_index.strftime('%Y/%m/%d')
###Output
Enter Number Of Days You Want to Predict : 7
###Markdown
Creating class for building model
###Code
class Corona_Confirmed_model():
def create_dataset(self, dataset, time_steps=1, column = 0):
dataX, dataY = [], []
for i in range(len(dataset)-time_steps-1):
a = dataset[i:(i+time_steps), column]
dataX.append(a)
dataY.append(dataset[i + time_steps, column])
return np.array(dataX), np.array(dataY)
def Model(self,dframe,column):
df = dframe.values
df = df.astype('float32')
time_steps = 1
trainX, trainY = self.create_dataset(df, time_steps, column)
trainX = np.reshape(trainX, (trainX.shape[0],1,trainX.shape[1]))
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
confirm_model = keras.models.Sequential([
keras.layers.InputLayer(input_shape=[None, 1]),
keras.layers.Conv1D(filters=16, kernel_size=2,
strides=1, padding="causal",
activation="relu",
),
keras.layers.LSTM(30,kernel_initializer = 'he_normal',
return_sequences = True),
keras.layers.LSTM(30,kernel_initializer = 'he_normal',
return_sequences = True),
keras.layers.Dense(15,kernel_initializer = 'he_normal',
activation = 'relu'),
keras.layers.Dense(15,kernel_initializer = 'he_normal',
activation = 'relu'),
keras.layers.Dense(1,activation = 'linear',kernel_initializer = 'he_normal'),
keras.layers.Lambda(lambda x: x * 200)
])
optimizer = keras.optimizers.Adadelta(lr=0.001)
confirm_model.compile(loss=['mae'],optimizer=optimizer)
confirm_model.fit(trainX, trainY, epochs=100,
batch_size = 10,verbose = 0)
confirm_test_seq = trainX[-1:]
confirm_test_seq = confirm_test_seq
confirm_test_seq = confirm_test_seq.reshape(len(confirm_test_seq), time_steps, 1)
confirm_preds = []
for _ in range (DAYS_TO_PREDICT):
confirm_pred = confirm_model.predict(confirm_test_seq)
confirm_preds.append(confirm_pred)
confirm_new_seq = confirm_pred
confirm_test_seq = confirm_new_seq
confirm_test_seq = confirm_test_seq.reshape(len(confirm_test_seq ), time_steps, 1)
return confirm_preds
###Output
_____no_output_____
###Markdown
Calling the class and predicting the cases for every individual county
###Code
NN = Corona_Confirmed_model()
confirm_predictions = []
for i in range(len(County_confirmed)):
result = NN.Model(confirmed_df_scaled,i)
confirm_predictions.append(result)
print(i)
###Output
0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
###Markdown
Processing the predicted data
###Code
confirm_predictions = (np.array(confirm_predictions).transpose())
confirm_predictions = confirm_predictions.reshape(((DAYS_TO_PREDICT),len(County_confirmed)))
confirm_predictions = sc.inverse_transform(confirm_predictions)
confirm_predictions = pd.DataFrame(confirm_predictions)
confirm_predictions = confirm_predictions.abs()
confirm_predictions['Dates'] = predict_confirm_index
confirm_predictions.set_index('Dates', inplace = True)
confirm_predictions.columns = County_confirmed
confirm_predictions.head()
###Output
_____no_output_____
###Markdown
Visualizing The Confirmed Cases For A County As Per The User's Choice List of Infected Countries
###Code
Infected_Counties = pd.DataFrame(County_confirmed)
Infected_Counties.columns = ['Counties']
pd.set_option("max_rows", None)
Infected_Counties
###Output
_____no_output_____
###Markdown
Select The County
###Code
County =input('Enter the name of the county from above list you would like to see the daily cases till now : ')
###Output
_____no_output_____
###Markdown
Till Date Confirm Cases on daily basis of coronavirus
###Code
fig = go.Figure()
fig.update_layout(template='plotly_dark',title={'text': 'Confirmed Daily Cases','y':0.9,'x':0.5,'xanchor': 'center',
'yanchor': 'top'},
xaxis_title = "Dates",
yaxis_title="Number Of Cases",
)
fig.add_trace(go.Scatter(x=confirmed_df.index,
y=confirmed_df[County],
mode='lines+markers',
line=dict(color='blue', width=2)))
###Output
_____no_output_____
###Markdown
Predictions of confirmed cases of coronavirus
###Code
fig = go.Figure()
fig.update_layout(template='plotly_dark',title={'text': 'Confirmed Cases Predictions','y':0.9,'x':0.5,'xanchor': 'center',
'yanchor': 'top'},
xaxis_title = "Dates",
yaxis_title="Number Of Cases")
fig.update_xaxes(tickangle=90, showticklabels = True, type = 'category')
fig.add_trace(go.Scatter(x=confirm_predictions.index,
y=confirm_predictions[County],
mode='lines+markers',
line=dict(color='yellow', width=2)))
county_list = Infected_Counties.values.tolist()
print(county_list)
f = open("predictions.txt", "a+")
for county in county_list:
f.write(str(confirm_predictions[county]))
#print(str(confirm_predictions[county]))
f.close()
###Output
[['AB'], ['AG'], ['AR'], ['B'], ['BC'], ['BH'], ['BN'], ['BR'], ['BT'], ['BV'], ['BZ'], ['CJ'], ['CL'], ['CS'], ['CT'], ['CV'], ['DB'], ['DJ'], ['GJ'], ['GL'], ['GR'], ['HD'], ['HR'], ['IF'], ['IL'], ['IS'], ['MH'], ['MM'], ['MS'], ['NT'], ['OT'], ['PH'], ['SB'], ['SJ'], ['SM'], ['SV'], ['TL'], ['TM'], ['TR'], ['VL'], ['VN'], ['VS']]
|
week_09.ipynb | ###Markdown
Object & Class- 객체? 실체적인 물건, 개념- 속성, 행동을 가지는 개념- 자동차 - 속성: 엔진, 브랜드, 가격, 색상, 이름, ... => 변수 - 행동: 나가다, 정차하다, 좌회전, 우회전, ... => 메소드 (함수)- 고양이 - 속성: 종, 이름, 컬러, 몸무게, 성별, ... - 행동: 짖다, 뛰다, 걷다, 째려보다, ...- 사전 - 속성: 글자수, 시작글자, 출현빈도수, ... - 행동: 추가하다, 제외하다, ... - 관련 짓는 방법 - 사람 상점 물건 - 이름, 돈, 재고, 이익, 고객명단, 브랜드, 이름, 가격 - 사다 회원등록, 팔다, 수익계산 - 객체 사용하는 이유: 코드 재사용 수월} 클래스- 객체가 가져야할 기본 정보를 담은 코드- 설계도, 틀, 프레임워크- 붕어빵의 틀- 붕어빵 == 인스턴스- 'class Car:' - 클래스명은 카멜표기법을 쓴다 class NiceCar getter, setter- 캡슐화, 정보 은닉- 클래스 외부에서 변수 접근을 용이하지 못하게 하는 것- public -> private
###Code
class Student:
def __init__(self, input_id, input_name, input_major):
self.__student_id = input_id # 가장 파이썬스럽고 좋은 방법
self.student_name = input_name
self.hidden_major = input_major # name 보다는 상대적으로 더 파이썬스러움
@property
def student_id(self):
return self.__student_id
@student_id.setter
def student_id(self, input_id):
self.__student_id = input_id
def get_name(self):
print('from the getter')
return self.name
def set_name(self, input_name):
print('from the setter')
self.student_name = input_name
def get_major(self):
return self.hidden_major
def set_major(self, input_major):
self.hidden_major = input_major
student_major = property(get_major, set_major)
watson = Student(20201, 'watson', 'cs')
watson.student_name
watson.student_major
watson.hidden_major
watson.student_id
watson.__student_id
import copy
# 클래스 실습
class Soldier:
def __init__(self, input_name):
self.name = input_name
self.location = [0, 0] # x axis, y axis
def walk_forward(self, distance=1):
self.location[1] += distance
def walk_backward(self, distance=1):
self.location[1] -= distance
def walk_left(self, distance=1):
self.location[0] -= distance
def walk_right(self, distance=1):
self.location[0] += distance
def shoot(self):
shoot_location = copy.deepcopy(self.location)
shoot_location[1] += 3
print(f'{self.name} has shot enemies from {self.location} to {shoot_location}')
kim = Soldier('kim')
kim.walk_forward()
kim.walk_left(3)
kim.shoot()
###Output
kim has shot enemies from [-3, 1] to [-3, 4]
|
Clustering/B_KNN.ipynb | ###Markdown
**KNN** **Implementacion**
###Code
import numpy as np
import matplotlib.pyplot as plt
import csv
from math import sqrt, inf
from random import randint
class Node:
def __init__(self, parent, x, area):
self.parent = parent
self.x = x
self.childs = [None, None] # [left_child, right_child]
# El area es un vector 2*len(x)-dimensional y representa un hipercubo, donde cada
# par de elementos representan los valores minimos y maximos de una determinada coordenada.
# Por ejemplo, si len(x) == 2, entonces area = [a, b, c, d] representa el cuadrado:
# a <= x[0] <= b; c <= x[1] <= d
self.area = area
class KNN:
def __init__(self, X):
self.X = X
def d(self, x, y):
""" Distancia euclidiana entre dos vectores. """
return np.linalg.norm(x-y)
def build_kd_tree(self, X=None, parent=None, right=True, d=0, root=True, area=None):
""" Construimos un KD-Tree.
INPUT:
X: Conjunto de datos del nodo actual.
parent: Nodo padre del nodo actual.
right: Indica si el nodo actual es el hijo derecho.
d: Atributo que se usara para realizar la division binaria de los datos.
root: Indica si el nodo actual es la raiz de todo el arbol.
area: Area que representa el nodo actual.
"""
# Si el nodo es la raiz, entonces tomamos todos los datos y el area es todo el espacio.
if root:
X = self.X
area = [-inf,inf]*len(X[0])
# Si no hay elementos, no se crea ningun nodo
if len(X) == 0: return
# Si solo hay un elemento, creamos un nodo con ese unico elemento.
elif len(X) == 1:
node = Node(parent, X[0], area)
# Verificamos que el nodo no sea la raiz, lo que significaria que solo hay un dato.
if not root: parent.childs[int(right)] = node
# Si hay mas de un dato.
else:
# Ordenamos los elementos segun el d-esimo atributo.
X_c = X.copy()
X_c.sort(key = lambda x: x[d])
# Obtenemos la mediana.
m = int(len(X_c)/2)
x_m = X_c[m]
# Creamos un nuevo nodo donde se almacenara la mediana.
node = Node(parent, x_m, area)
if not root: parent.childs[int(right)] = node
else: self.kd_tree = node
# Llamamos recursivamente la funcion para los hijos izquierdo y derecho.
# Derecho
X_r = X_c[m+1:].copy()
area_r = area.copy()
area_r[2*d] = x_m[d]
# Izquierdo
X_l = X_c[:m].copy()
area_l = area.copy()
area_l[2*d+1] = x_m[d]
# Llamada recursiva
self.build_kd_tree(X_l, node, False, (d+1)%len(x_m), False, area_l)
self.build_kd_tree(X_r, node, True, (d+1)%len(x_m), False, area_r)
def knn(self, x, k, method="Force"):
""" Calculamos los k vecinos mas cercanos de un determinado elemento.
INPUT:
x: Elemento.
k: Numero de vecinos.
method: Metodo que se realizara para calcular los k vecinos. Solo puede
tomar el valor de "Force" y "KD-Tree" """
# Calculamos los k vecinos usando fuerza bruta.
if method == "Force":
# Almacenamoslas distancias y los vecinos correspondientes.
distances, neighbors = [], []
# Por cada elemento del dataset.
for x_i in self.X:
# Calculamos la distancia.
dist = self.d(x, x_i)
find = False
# Usando la idea de insertion-sort, insertamos el elemento segun la distancia
# obtenida.
for i in range(len(distances)):
if distances[i] > dist:
A, B = distances[:i].copy(), neighbors[:i].copy()
A.append(dist)
B.append(x_i)
A.extend(distances[i:])
B.extend(neighbors[i:])
distances, neighbors = A.copy(), B.copy()
find = True
break
if not find:
distances.append(dist)
neighbors.append(x_i)
# Retornamos los primeros k elementos que seran los de menor distancia.
return neighbors[:k]
# Calculamos los k vecinos utilizando el KD-Tree.
elif method == "KD-Tree":
# Aqui almacenaremos las k distancias minimas.
self.min_d = [inf]*k
# Y aqui los k vecinos.
self.neighbors = [None]*k
self.knn_kd_tree(x, self.kd_tree, 0)
neighbors = self.neighbors
# Nos aseguramos de eliminar estos atributos.
self.min_d = None
self.neighbors = None
return neighbors
def knn_kd_tree(self, x, node, d):
# Verificamos si el punto se encuentra fuera del hipercubo definido por el nodo actual.
if not all(node.area[2*i] <= x[i] <= node.area[2*i+1] for i in range(len(x))):
# Por cada dimension, verificamos si el punto se encuentra dentro de los lados
# correspondientes al hipercubo
p = []
for i in range(len(x)):
# Si no es asi, almacenamos la coordenada del punto que se encuentra fuera del
# lado del hipercubo.
if node.area[2*i] > x[i]: p.append(node.area[2*i])
elif x[i] > node.area[2*i+1]: p.append(node.area[2*i+1])
else: p.append(x[i])
# Calculamos la distancia entre las coordenadas del punto fuera del hipercubo y
# la interseccion de los lados correspondientes. Si es mayor a la mayor de las
# distancias menores actuales, entonces no existe ninguno punto dentro del hipercubo
# que este mas cerca que alguno de los actuales k vecinos.
dist = self.d(np.array(p), x)
if dist >= self.min_d[-1]: return
# Calculamos la distancia entre el punto y la raiz actual. Verificamos si es menor
# que alguna de las menores distancias.
dist = self.d(x, node.x)
for i in range(len(self.min_d)):
if dist < self.min_d[i]:
# Copiamos los i-1-esimos vecinos actuales
aux_d, aux_n = self.min_d[:i].copy(), self.neighbors[:i].copy()
# Agregamos el nuevo
aux_d.append(dist)
aux_n.append(node.x)
# Agregamos los vecinos restantes, menos el ultimo que queda fuera.
aux_d.extend(self.min_d[i:len(self.min_d)-1])
aux_n.extend(self.neighbors[i:len(self.neighbors)-1])
# Actualizamos.
self.min_d, self.neighbors = aux_d, aux_n
# Salimos del for
break
# Llamamos primero a la subdivision del arbol tal que el punto cumpla la condicion,
# con la esperanza de que al llamar el segundo hijo, este pueda ser descartado facilmente.
# Si no cumple ninguna, se recorre primero el hijo izquierdo (si no es nulo) y luego el derecho.
if x[d] <= node.area[2*d+1] and node.childs[0] != None:
self.knn_kd_tree(x, node.childs[0], (d+1)%len(x))
if node.childs[1] != None:
self.knn_kd_tree(x, node.childs[1], (d+1)%len(x))
elif x[d] >= node.area[2*d] and node.childs[1] != None:
self.knn_kd_tree(x, node.childs[1], (d+1)%len(x))
if node.childs[0] != None:
self.knn_kd_tree(x, node.childs[0], (d+1)%len(x))
elif node.childs[0] != None:
self.knn_kd_tree(x, node.childs[0], (d+1)%len(x))
if node.childs[1] != None:
self.knn_kd_tree(x, node.childs[1], (d+1)%len(x))
elif node.childs[1] != None:
self.knn_kd_tree(x, node.childs[1], (d+1)%len(x))
def radius_neighbors(self, x, r):
# Aqui almacenamos los vecinos
self.neighbors = []
self.r_neighbors(x, self.kd_tree, 0, r)
neighbors = self.neighbors
# Nos aseguramos de eliminar estos atributos.
self.neighbors = None
return neighbors
def r_neighbors(self, x, node, d, r):
# Verificamos si el punto se encuentra fuera del hipercubo definido por el nodo actual.
if not all(node.area[2*i] <= x[i] <= node.area[2*i+1] for i in range(len(x))):
# Por cada dimension, verificamos si el punto se encuentra dentro de los lados
# correspondientes al hipercubo
p = []
for i in range(len(x)):
# Si no es asi, almacenamos la coordenada del punto que se encuentra fuera del
# lado del hipercubo.
if node.area[2*i] > x[i]: p.append(node.area[2*i])
elif x[i] > node.area[2*i+1]: p.append(node.area[2*i+1])
else: p.append(x[i])
# Calculamos la distancia entre las coordenadas del punto fuera del hipercubo y
# la interseccion de los lados correspondientes. Si es mayor al radio, no necesitamos
# verificar mas esta rama.
dist = self.d(np.array(p), x)
if dist > r: return
# Calculamos la distancia entre el punto y la raiz actual. Verificamos si es menor
# que el raio
dist = self.d(x, node.x)
if dist < r: self.neighbors.append(node.x)
# Llamamos primero a la subdivision del arbol tal que el punto cumpla la condicion,
# con la esperanza de que al llamar el segundo hijo, este pueda ser descartado facilmente.
# Si no cumple ninguna, se recorre primero el hijo izquierdo (si no es nulo) y luego el derecho.
if x[d] <= node.area[2*d+1] and node.childs[0] != None:
self.r_neighbors(x, node.childs[0], (d+1)%len(x), r)
if node.childs[1] != None:
self.r_neighbors(x, node.childs[1], (d+1)%len(x), r)
elif x[d] >= node.area[2*d] and node.childs[1] != None:
self.r_neighbors(x, node.childs[1], (d+1)%len(x), r)
if node.childs[0] != None:
self.r_neighbors(x, node.childs[0], (d+1)%len(x), r)
elif node.childs[0] != None:
self.r_neighbors(x, node.childs[0], (d+1)%len(x), r)
if node.childs[1] != None:
self.r_neighbors(x, node.childs[1], (d+1)%len(x), r)
elif node.childs[1] != None:
self.r_neighbors(x, node.childs[1], (d+1)%len(x), r)
###Output
_____no_output_____
###Markdown
**Lectura de Datos**
###Code
# Generamos los datos y los graficamos.
X = []
for i in range(1000):
X.append(np.array([randint(-100, 100), randint(-100, 100)]))
# Agregamos los puntos en el plano 3D
plt.plot([x[0] for x in X], [x[1] for x in X], '.', c='b')
# Mostramos el gráfico
plt.show()
###Output
_____no_output_____
###Markdown
**Resultados** **Fuerza bruta**
###Code
knn = KNN(X)
x = np.array([randint(-100, 100), randint(-100, 100)])
k = randint(1, 20)
neighbors = knn.knn(x, k)
plt.figure(figsize=(10,10))
plt.xlim(-100, 100)
plt.ylim(-100, 100)
# Agregamos los puntos en el plano 3D
plt.plot([x[0] for x in X], [x[1] for x in X], 'o', c='b')
plt.plot([n[0] for n in neighbors],
[n[1] for n in neighbors], 'o', c='r')
plt.plot([x[0]], [x[1]], 'x', c='r')
# Mostramos el gráfico
plt.show()
###Output
_____no_output_____
###Markdown
**KD-Tree**
###Code
knn.build_kd_tree()
x = np.array([randint(-100, 100), randint(-100, 100)])
k = randint(1, 20)
neighbors = knn.knn(x, 10, method="KD-Tree")
plt.figure(figsize=(10,10))
plt.xlim(-100, 100)
plt.ylim(-100, 100)
# Agregamos los puntos en el plano 3D
plt.plot([x[0] for x in X], [x[1] for x in X], 'o', c='b')
plt.plot([n[0] for n in neighbors], [n[1] for n in neighbors], 'o', c='r')
plt.plot([x[0]], [x[1]], 'x', c='r')
# Mostramos el gráfico
plt.show()
###Output
_____no_output_____
###Markdown
**Vecinos dentro de un radio**
###Code
knn.build_kd_tree()
x = np.array([randint(-100, 100), randint(-100, 100)])
neighbors = knn.radius_neighbors(x, 50)
plt.figure(figsize=(10,10))
plt.xlim(-100, 100)
plt.ylim(-100, 100)
# Agregamos los puntos en el plano 3D
plt.plot([x[0] for x in X], [x[1] for x in X], 'o', c='b')
plt.plot([n[0] for n in neighbors], [n[1] for n in neighbors], 'o', c='r')
plt.plot([x[0]], [x[1]], 'x', c='r')
# Mostramos el gráfico
plt.show()
###Output
_____no_output_____ |
src/notebooks/101-make-a-color-palette-with-seaborn.ipynb | ###Markdown
Welcome in the introductory template of the python graph gallery. Here is how to proceed to add a new `.ipynb` file that will be converted to a blogpost in the gallery! Notebook Metadata It is very important to add the following fields to your notebook. It helps building the page later on:- **slug**: the URL of the blogPost. It should be exactly the same as the file title. Example: `70-basic-density-plot-with-seaborn`- **chartType**: the chart type like density or heatmap. For a complete list see [here](https://github.com/holtzy/The-Python-Graph-Gallery/blob/master/src/util/sectionDescriptions.js), it must be one of the `id` options.- **title**: what will be written in big on top of the blogpost! use html syntax there.- **description**: what will be written just below the title, centered text.- **keyword**: list of keywords related with the blogpost- **seoDescription**: a description for the bloppost meta. Should be a bit shorter than the description and must not contain any html syntax. Add a chart description A chart example always come with some explanation. It must:contain keywordslink to related pages like the parent page (graph section)give explanations. In depth for complicated charts. High level for beginner level charts Add a chart
###Code
import seaborn as sns, numpy as np
np.random.seed(0)
x = np.random.randn(100)
ax = sns.distplot(x)
###Output
_____no_output_____
###Markdown
You can choose color palettes in [seaborn](http://python-graph-gallery.com/seaborn/) plots. There are 3 categories of color palettes: sequential, discrete and diverging. You can find explanations and examples for each category in the following sections. Sequential **Sequential** color palettes are appropriate when you are mapping values from relatively low to high or from high to low. In order to set the colors move from lighther to darker in a sequential color palette, you should give `palette` parameter in your plot function. If you want the reverse order of colors (darker to lighter), you can simply add the suffix "_r" to color of your choice.
###Code
# Libraries
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# create data
x = np.random.rand(80) - 0.5
y = x+np.random.rand(80)
z = x+np.random.rand(80)
df = pd.DataFrame({'x':x, 'y':y, 'z':z})
# Plot with palette
sns.lmplot( x='x', y='y', data=df, fit_reg=False, hue='x', legend=False, palette="Blues")
plt.show()
# reverse palette
sns.lmplot( x='x', y='y', data=df, fit_reg=False, hue='x', legend=False, palette="Blues_r")
plt.show()
###Output
_____no_output_____
###Markdown
Diverging **Diverging** color palettes are appropriate when you equally important high and low values in your dataset. Diverging colors are composed of 2 contrast colors, darker on the edges and lighther in the center. You can use reverse colors by adding the suffix "_r" to color of your choice.
###Code
# Libraries
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# create data
x = np.random.rand(80) - 0.5
y = x+np.random.rand(80)
z = x+np.random.rand(80)
df = pd.DataFrame({'x':x, 'y':y, 'z':z})
# plot
sns.lmplot( x='x', y='y', data=df, fit_reg=False, hue='x', legend=False, palette="PuOr")
plt.show()
# reverse palette
sns.lmplot( x='x', y='y', data=df, fit_reg=False, hue='x', legend=False, palette="PuOr_r")
plt.show()
###Output
_____no_output_____
###Markdown
Discrete You can control the colors using `set_palette()` function of seaborn. It is possible to give a list of colors you want to use in your plots as a parameter to set_palette function.
###Code
# library & dataset
import seaborn as sns
import matplotlib.pyplot as plt
df = sns.load_dataset('iris')
# use the 'palette' argument of seaborn
sns.lmplot( x="sepal_length", y="sepal_width", data=df, fit_reg=False, hue='species', legend=False, palette="Set1")
plt.legend(loc='lower right')
plt.show()
# use a handmade palette
flatui = ["#9b59b6", "#3498db", "orange"]
sns.set_palette(flatui)
sns.lmplot( x="sepal_length", y="sepal_width", data=df, fit_reg=False, hue='species', legend=False)
plt.show()
###Output
_____no_output_____
###Markdown
You can choose color palettes in [seaborn](http://python-graph-gallery.com/seaborn/) plots. There are 3 categories of color palettes: **sequential**, **discrete** and **diverging**. You can find explanations and examples for each category in the following sections. Sequential **Sequential** color palettes are appropriate when you are mapping values from relatively low to high or from high to low. In order to set the colors to move from lighther to darker in a sequential color palette, you should use the `palette` parameter in your plot function. If you want the reverse order of colors (darker to lighter), you can simply add the suffix "_r" to the color of your choice.
###Code
# Libraries
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# create data
x = np.random.rand(80) - 0.5
y = x+np.random.rand(80)
z = x+np.random.rand(80)
df = pd.DataFrame({'x':x, 'y':y, 'z':z})
# Plot with palette
sns.lmplot( x='x', y='y', data=df, fit_reg=False, hue='x', legend=False, palette="Blues")
plt.show()
# reverse palette
sns.lmplot( x='x', y='y', data=df, fit_reg=False, hue='x', legend=False, palette="Blues_r")
plt.show()
###Output
_____no_output_____
###Markdown
Diverging **Diverging** color palettes are appropriate when high and low values are **equally important** in your dataset. Diverging colors are composed of 2 contrast colors: darker on the edges and lighther in the center. You can use the reverse order of colors by adding the suffix "_r" to the color of your choice.
###Code
# Libraries
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# create data
x = np.random.rand(80) - 0.5
y = x+np.random.rand(80)
z = x+np.random.rand(80)
df = pd.DataFrame({'x':x, 'y':y, 'z':z})
# plot
sns.lmplot( x='x', y='y', data=df, fit_reg=False, hue='x', legend=False, palette="PuOr")
plt.show()
# reverse palette
sns.lmplot( x='x', y='y', data=df, fit_reg=False, hue='x', legend=False, palette="PuOr_r")
plt.show()
###Output
_____no_output_____
###Markdown
Discrete You can control the colors using the `set_palette()` function of seaborn. It is possible to give a list of colors you want to use in your plots as a parameter to set_palette function.
###Code
# library & dataset
import seaborn as sns
import matplotlib.pyplot as plt
df = sns.load_dataset('iris')
# use the 'palette' argument of seaborn
sns.lmplot( x="sepal_length", y="sepal_width", data=df, fit_reg=False, hue='species', legend=False, palette="Set1")
plt.legend(loc='lower right')
plt.show()
# use a handmade palette
flatui = ["#9b59b6", "#3498db", "orange"]
sns.set_palette(flatui)
sns.lmplot( x="sepal_length", y="sepal_width", data=df, fit_reg=False, hue='species', legend=False)
plt.show()
###Output
_____no_output_____ |
Solid State Physics/Homework_1_Problem_1.ipynb | ###Markdown
Homework 1 Problem 1
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Load data
data = pd.read_csv("./data/resistivity_table.csv", delimiter = "\t", header = 0)
data
#Class for storing data
class Element:
def __init__(self, name, density, lattice_constant, resistivity_list, temperature_list):
self.name = name
self.density = density
self.lattice_constant = lattice_constant
self.resistivity = {}
if len(resistivity_list) == len(temperature_list):
for i in range(len(resistivity_list)):
self.resistivity[temperature_list[i]] = resistivity_list[i]
#Units
density_units = 10 ** 28 #[m^-3]
lattice_units = 10 ** (-12) # [m]
resistivity_units = 10 ** (-8) # [ohm * m]
temperatures = [77, 273]
ag = Element('Ag', data['density'][0], data['lattice_constant'][0],
[data['resistivity_77'][0], data['resistivity_273'][0]], temperatures)
au = Element('Au', data['density'][1], data['lattice_constant'][1],
[data['resistivity_77'][1], data['resistivity_273'][1]], temperatures)
al = Element('Al', data['density'][2], data['lattice_constant'][2],
[data['resistivity_77'][2], data['resistivity_273'][2]], temperatures)
ag.__dict__
elements = [ag, au, al]
###Output
_____no_output_____
###Markdown
Basic TheoryOhm's law:\begin{equation}\vec{j} = \sigma \vec{E}\end{equation}In Drude model, electrical conductivity is given as:\begin{equation}\sigma = \frac{ne^2\tau}{m}\end{equation} Statement of problemUsing Drude model, calculate scattering time and mean free path of electrons. FormulasUsing electrical conductivity from Drude model, we can obtain expression for scattering time:\begin{equation}\tau = \frac{m}{\rho n e^2}\end{equation}Mean free path is given by:\begin{equation}l = v \tau\end{equation}where $v$ can be estimated using equation:\begin{equation}\overline{E_k} = \frac{1}{2}mv^2 = \frac{3}{2} kT\end{equation}So we get expression for mean free path:\begin{equation}l = \tau \sqrt{\frac{3kT}{m}}\end{equation}
###Code
from scipy.constants import e, k, m_e
def tau(density, resistivity):
return m_e / (e ** 2 * density * resistivity)
def mean_free_path(tau, temperature):
return tau * np.sqrt(3 * k * temperature / m_e)
results_dict = {}
for element in elements:
results_dict[element] = {}
results_dict[element]['element'] = element.name
for temperature in temperatures:
results_dict[element]['tau_' + str(temperature)] = tau(element.density, element.resistivity[temperature])
results_dict[element]['mfp_' + str(temperature)] = \
mean_free_path(results_dict[element]['tau_' + str(temperature)], temperature)
results_dict
results = pd.DataFrame(results_dict)
for i in range(len(elements)):
results = results.rename(index = str, columns = {elements[i]: i})
results
###Output
_____no_output_____ |
testing/.ipynb_checkpoints/Expansion_Theory_Exper-checkpoint.ipynb | ###Markdown
Import libs
###Code
import sys
import os
import itertools
sys.path.append('..')
from eflow.foundation import DataPipeline,DataFrameTypes
from eflow.model_analysis import ClassificationAnalysis
from eflow.utils.modeling_utils import optimize_model_grid
from eflow.utils.eflow_utils import get_type_holder_from_pipeline, remove_unconnected_pipeline_segments
from eflow.utils.sys_utils import load_pickle_object
from eflow.utils.pandas_utils import data_types_table
from eflow.auto_modeler import AutoCluster
from eflow.data_pipeline_segments import DataEncoder
import pandas as pd
import numpy as np
from dtreeviz.trees import *
from functools import partial
import scikitplot as skplt
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
import copy
import pickle
import time
import math
import multiprocessing as mp
from functools import partial
from scipy import stats
from IPython.display import clear_output
%matplotlib notebook
%matplotlib inline
###Output
_____no_output_____
###Markdown
Declare Project Variables Interaction required
###Code
dataset_path = "Datasets/titanic_train.csv"
# -----
dataset_name = "Titanic Data"
pipeline_name = "Titanic Pipeline"
# -----
# -----
notebook_mode = True
###Output
_____no_output_____
###Markdown
Clean out segment space
###Code
remove_unconnected_pipeline_segments()
###Output
_____no_output_____
###Markdown
Import dataset
###Code
df = pd.read_csv(dataset_path)
shape_df = pd.DataFrame.from_dict({'Rows': [df.shape[0]],
'Columns': [df.shape[1]]})
display(shape_df)
display(df.head(30))
data_types_table(df)
###Output
_____no_output_____
###Markdown
Loading and init df_features
###Code
# Option: 1
# df_features = get_type_holder_from_pipeline(pipeline_name)
# Option: 2
df_features = DataFrameTypes()
df_features.init_on_json_file(os.getcwd() + f"/eflow Data/{dataset_name}/df_features.json")
df_features.display_features(display_dataframes=True,
notebook_mode=notebook_mode)
###Output
_____no_output_____
###Markdown
Any extra processing before eflow DataPipeline Setup pipeline structure Interaction Required
###Code
main_pipe = DataPipeline(pipeline_name,
df,
df_features)
main_pipe.perform_pipeline(df,
df_features)
df.drop(columns=[df_features.target_feature()],
inplace=True)
df_features.remove_feature(df_features.target_feature())
###Output
_____no_output_____
###Markdown
Declare Variables
###Code
qualitative_features=list(df_features.get_dummy_encoded_features().keys())
require_binning = df_features.continuous_numerical_features()
require_binning
require_binning -= {'Parch', 'SibSp'}
require_binning
df
data_encoder = DataEncoder(create_file=False)
data_encoder.revert_dummies(df,
df_features,
qualitative_features=qualitative_features)
df
data_encoder.apply_binning(df,
df_features,
require_binning)
df
value_matrix = []
for feature_name in df.columns:
value_matrix.append(df[feature_name].dropna().unique().tolist())
feature_columns = copy.deepcopy(df.columns)
del df
df = pd.DataFrame(list(itertools.product(*value_matrix)), columns=feature_columns)
df
data_encoder.make_dummies(df,
df_features,
qualitative_features=qualitative_features)
df
for feature_name, bins_labels in df_features.get_all_feature_binning().items():
bins = bins_labels["bins"]
labels = bins_labels["labels"]
label_bin_avg = dict()
for i,l in enumerate(labels):
label_bin_avg[l] = sum(bins[i:i+2])/2
df.replace(label_bin_avg,inplace=True)
df
###Output
_____no_output_____
###Markdown
Load Model
###Code
model = load_pickle_object("/Users/ericcacciavillani/Desktop/Coding/Python_Files/Artificial_Intelligence/Data Mining/eFlow/testing/eflow Data/Titanic Data/Classification Analysis/Target Feature: Survived/RandomForestClassifier/RandomForestClassifier.pkl")
model_predictions = model.predict(df)
predicted_classes = set(sorted(model_predictions.tolist()))
cluster_results = [None for i in range(0,len(model_predictions))]
cluster_label_buffer = 0
cluster_cut_off_points = []
for target in predicted_classes:
print(f"target={target}")
bool_array = model_predictions == target
auto_cluster = AutoCluster(df[bool_array],
dataset_name=f"Exansion Auto Clustering/Clustering/Target={target}",
project_sub_dir=dataset_name,
overwrite_full_path=None,
notebook_mode=True,
pca_perc=.8)
best_clusters = auto_cluster.create_elbow_models(model_names=["K-Means"],
sequences=10,
max_k_value=11,
display_visuals=True)
cluster_model = load_pickle_object(auto_cluster.folder_path + f"Models/K-Means/Clusters={best_clusters[1]}/K-Means_Clusters={best_clusters[1]}.pkl")
tmp_results = cluster_model.predict(auto_cluster.apply_clustering_data_pipeline(df[bool_array]))
cluster_count = 0
for i,replace_val in enumerate(bool_array):
if replace_val:
cluster_results[i] = tmp_results[cluster_count] + cluster_label_buffer
cluster_count += 1
cluster_label_buffer += best_clusters[1]
if len(cluster_cut_off_points):
cluster_cut_off_points.append(cluster_cut_off_points[-1] + best_clusters[1])
else:
cluster_cut_off_points.append(best_clusters[1])
df["Target"] = cluster_results
df
###Output
_____no_output_____
###Markdown
Seperate out data into train and test sets
###Code
feature_order = list(df.columns)
feature_order.remove("Target")
feature_order
X = df.drop(columns="Target").values
y = df["Target"].values
del df
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.3,
random_state=517,
stratify=y,
)
del X
del y
# Find best parameters for model
param_grid = {
"criterion": ["entropy", "gini"],
"max_depth": [4,5,6,7,8,9,10,11,12,13,14,15,16],
"min_samples_split": [2,5,10,15,30,40,50,60,70,80]
}
model, best_params = optimize_model_grid(
model=DecisionTreeClassifier(),
X_train=X_train,
y_train=y_train,
param_grid=param_grid,
scoring="f1_micro",
n_jobs=-1
)
model_name = repr(model).split("(")[0]
model_name
model_analysis = ClassificationAnalysis(dataset_name=dataset_name,
project_sub_dir="Exansion Auto Clustering/Modeling for DT",
model=model,
model_name=model_name,
feature_order=feature_order,
target_feature=df_features.target_feature(),
pred_funcs_dict={"Probabilities function":model.predict_proba,
"Predict function":model.predict},
sample_data=X_train[0],
notebook_mode=notebook_mode,
df_features=df_features)
model_analysis.perform_analysis(X=X_train,
y=y_train,
dataset_name="Train Data",
classification_error_analysis=False,
classification_correct_analysis=False)
model_analysis.perform_analysis(X=X_test,
y=y_test,
dataset_name="Test Data",
classification_error_analysis=False,
classification_correct_analysis=False)
viz = dtreeviz(model,
X_test,
y_test,
target_name='Target',
feature_names=feature_order,
class_names=model.classes_.tolist())
viz.view()
cluster_cut_off_points
def expansion_prediction(model,
predicted_classes,
cluster_cut_off_points,
data):
try:
y_preds = model.predict(data)
except AttributeError:
pass
for i,_ in enumerate(y_preds):
for label_index,label in enumerate(cluster_cut_off_points):
if y_preds[i] < label:
y_preds[i] = list(predicted_classes)[label_index]
break
return y_preds
dt_prediction = partial(expansion_prediction,
model,
predicted_classes,
cluster_cut_off_points)
del X_train
del X_test
del y_train
del y_test
###Output
_____no_output_____
###Markdown
Re-import dataset
###Code
df = pd.read_csv(dataset_path)
shape_df = pd.DataFrame.from_dict({'Rows': [df.shape[0]],
'Columns': [df.shape[1]]})
display(shape_df)
display(df.head(30))
data_types_table(df)
###Output
_____no_output_____
###Markdown
Loading and init df_features
###Code
# Option: 1
# df_features = get_type_holder_from_pipeline(pipeline_name)
# Option: 2
df_features = DataFrameTypes()
df_features.init_on_json_file(os.getcwd() + f"/eflow Data/{dataset_name}/df_features.json")
df_features.display_features(display_dataframes=True,
notebook_mode=notebook_mode)
###Output
_____no_output_____
###Markdown
Any extra processing before eflow DataPipeline Setup pipeline structure Interaction Required
###Code
main_pipe = DataPipeline(pipeline_name,
df,
df_features)
main_pipe.perform_pipeline(df,
df_features)
df
feature_order = list(df.columns)
feature_order
X = df.drop(columns=df_features.target_feature()).values
y = df[df_features.target_feature()].values
del df
model_analysis = ClassificationAnalysis(dataset_name=dataset_name,
project_sub_dir="Exansion Auto Clustering/Modeling for DT/Actual results",
model=model,
model_name=model_name,
feature_order=feature_order,
target_feature=df_features.target_feature(),
pred_funcs_dict={"Predict function":dt_prediction},
sample_data=X[0],
notebook_mode=notebook_mode,
df_features=df_features)
model_analysis.perform_analysis(X=X,
y=y,
dataset_name="All Data",
classification_error_analysis=False,
classification_correct_analysis=False)
feature_order.remove(df_features.target_feature())
from sklearn.tree import export_text
dtree_node_level_cond = dict()
for tree_info in export_text(model,feature_names=feature_order).split("\n"):
conditional_value = tree_info.split("|--- ")[1]
depth_level = len(tree_info.split("| ")) - 1
dtree_node_level_cond[depth_level] = conditional_value
if conditional_value[0:7] == "class: ":
for i_level in range(0,depth_level):
print(dtree_node_level_cond[i_level])
target_val = int(dtree_node_level_cond[depth_level].split("class: ")[1])
for i,label in enumerate(cluster_cut_off_points):
if target_val < label:
print(f"class: {i}")
break
print()
###Output
_____no_output_____ |
Prototype Notebook/.ipynb_checkpoints/Max Project-checkpoint.ipynb | ###Markdown
Plotting raw data
###Code
carbonates.Plot.plot_data(direction="z")
# Learn about API authentication here: https://plot.ly/pandas/getting-started
# Find your api_key here: https://plot.ly/settings/api
import plotly.plotly as py
import plotly.graph_objs as go
import pandas as pd
df = carbonates.Data.Interfaces
df.head()
data = []
clusters = []
#colors = ['rgb(228,26,28)','rgb(55,126,184)','rgb(77,175,74)']
for i in range(len(df['formation'].unique())):
name = df['formation'].unique()[i]
x = df[ df['formation'] == name ]['X']
y = df[ df['formation'] == name ]['Y']
z = df[ df['formation'] == name ]['Z']
trace = dict(
name = name,
x = x, y = y, z = z,
type = "scatter3d",
mode = 'markers',
marker = dict( size=3, line=dict(width=0) ) )
data.append( trace )
layout = dict(
width=800,
height=550,
autosize=False,
title='MAx dataset',
scene=dict(
xaxis=dict(
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)'
),
yaxis=dict(
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)'
),
zaxis=dict(
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)'
),
aspectratio = dict( x=1, y=1, z=0.2 ),
aspectmode = 'automatic'
),
)
fig = dict(data=data, layout=layout)
# IPython notebook
py.iplot(fig, filename='Max data', validate=False)
carbonates.set_interpolator(u_grade = 2)
# Reset the block
carbonates.Interpolator.block.set_value(np.zeros_like(carbonates.Grid.grid[:,0]))
# Compute the block
carbonates.Interpolator.compute_block_model([0], verbose = 1)
np.unique(carbonates.Interpolator.block.get_value())
carbonates.Plot.plot_block_section(direction="x", aspect ="auto" )
carbonates.Plot.plot_potential_field(50, direction="y")
carbonates.Interpolator.a_T.get_value()
import plotly.tools as tls
tls.set_credentials_file(username='leguiom', api_key='hcdlNUAEuGC0m2rRy1pq')
Max = pn.read_excel('../input_data/gypsumDec4.xlsx')
Max = Max[['East','North','How']]
Max = Max.dropna()
import scipy
x = np.linspace(Max['East'].min(), Max['East'].max()+10000, 100)
y = np.linspace(Max["North"].min(), Max["North"].max()+10000, 100)
xi, yi = np.meshgrid(x, y)
# Interpolate
rbf = scipy.interpolate.Rbf(Max['East'], Max['North'], Max['How'], function='linear')
zi = rbf(xi, yi)
np.meshgrid?
scipy.interpolate.Rbf?
%matplotlib notebook
plt.imshow(zi, extent=[x.min(), x.max(), y.min(), y.max()], cmap = 'viridis')
plt.contour(xi, yi, zi, 10, cmap = 'viridis')
plt.colorbar()
plt.scatter(Max['East'], Max["North"], c='white', s = 60)
# calculation of spherical covariance given just the position of the known points x! You can use verbose = 1 to see the
# intermediate steps
def cov_spherical(x,r, C_o = 1, verbose = 0):
"""x = Array: Position of the measured points"
r = Range of the spherical semivariogram
C_o = Nugget, variance
"""
# Createing the lag vector
# i, j = np.indices((len(x),len(x)))
# h = np.zeros((len(x),len(x)))
# for l in range(len(x)):
# h[i==l] = abs(x[l]-x)
h = x
# Initializing
C_h = np.ones_like(h)
# Appliying the function
C_h = (h<r)*( C_o*(1-1.5*(h/r)+0.5*(h/r)**3))
if verbose !=0:
print ("Our lag matrix is")
print (h)
print( "Our covariance matrix is")
print (C_h)
return C_h
def Krigin2(x, x_pos = np.array([1.,4.,5.]) , por = np.array([0.14, 0.19, 0.15]), mu = False, r = 4.5, C_o = 0.005,verbose = 0):
if mu == False:
mu = np.mean(por)
#Lag:
Y = (por-mu)
# Covariance matrix:
C_h = cov_spherical(x_pos, r, C_o, verbose = verbose)
a = len(C_h)
C_h = np.hstack((C_h, np.ones((a, 1))))
C_h = np.vstack((C_h, np.ones(a+1)))
C_h[-1,-1] = 0
# "Interpolation point":
b = np.zeros(len(x_pos))
dist = abs(x-x_pos)
b = (dist<r)*C_o*(1-1.5*(dist/r)+0.5*(dist/r)**3)
b = np.append(b,1)
# Solving the system
lam = np.linalg.solve(C_h, b)
sol = sum(por*lam[:-1])
plt.plot(x_pos,por,'o', c = "r")
# Calculate the variance
var = C_o - np.sum(lam[:-1]*b[:-1]) + lam[-1]
if verbose != 0:
print ("weight", lam)
print (lam*b)
print ("mean solution", sol)
print ("variance solution", var)
plt.xlim(0,6)
plt.ylim(0.1,0.3)
plt.axvline(pos, c = 'r', ls = ':')
plt.title("Value at the position of the red line?")
plt.xlabel("x")
plt.ylabel("Porosity")
return sol, var
def euclidian_distances(points):
return (np.sqrt((points ** 2).sum(1).reshape((points.shape[0], 1)) +
(points ** 2).sum(1).reshape((1, points.shape[0])) -
2 * points.dot(points.T)))
def euclidian_distances2(wells, grid):
return (np.sqrt(
(wells ** 2).sum(1).reshape((wells.shape[0], 1)) +
(grid ** 2).sum(1).reshape((1, grid.shape[0])) -
2 * wells.dot(grid.T)))
grid_x = np.linspace(x.min(), x.max(),100)
grid_y = np.linspace(y.min(), y.max(),100)
grid = np.vstack((grid_x,grid_y))
grid.T;
ED = euclidian_distances(Max[['East', 'North']])
ED = np.nan_to_num(ED.values)
A = cov_spherical(ED, r = 50000)
A = np.vstack((A, np.ones_like(A[0])))
A = np.hstack((A, np.ones(len(A)).reshape(len(A),1)))
b = cov_spherical(euclidian_distances2(Max[['East', 'North']], grid.T).values, r = 50000)
b = np.vstack((b, np.ones_like(b[0])))
lam = np.linalg.solve(A,b)
sol = (lam[:-1,:].T * Max['How'].values).sum(axis = 1)
plt.contour((grid[0,:], grid[1,:]), sol)
grid[0,:].shape
plt.contourf?
###Output
_____no_output_____ |
2021/04 - Single Neuron Models/Single_neuron.ipynb | ###Markdown
Introduction to Computational Neuroscience Practice IV: Single Neuron Models Aqeel Labash, Daniel Majoral, Raul Vicente Important:Make sure that you saved your ipynb file correctly to avoid loss of information. Please submit this **ipynb** file only (unless you have extra files then zip this file with the extra ones). Everything should be included in this file questions, answers, codes, plots, and comments about your solutions. My **Pseudonym** is: [YOUR ANSWER] and it took me approximately: [YOUR ANSWER] hours to complete the home work.The data of how long it took you to complete the home work will help us to improve the home works to be balanced. 1.IntroductionIn this session we will have a brief look on three different computational models of a neuron: McCulloch-Pitts, Intergrate-and-Fire and Hodgkin-Huxley. *** Exercise 1: Logic gates (1)On the lecture we have seen how to construct `AND`, `OR` and `NOT` logic gates using the the McCulloch-Pitts model of a neuron. Remember that:``` if sum(w.*x) <0, output is 0 if sum(w.*x)>=0, output is 1 (notice that 0 is included)```**Your task is** to construct more logical operations using MP neurons. Please construct the following two gates (create a function with fixed weights that take input $x,y$ and output $O$ which represent the required output):1. NAND
###Code
x = [1,0,1,0]
y = [0,0,1,1]
def NAND(x,y):
################################
##### YOUR CODE STARTS HERE ####
assert False,'function not implemented yet'
##### YOUR CODE ENDS HERE #####
################################
return results
print(NAND(x,y))
###Output
_____no_output_____
###Markdown
2. XOR**HINT 1:** For the `XOR` gate you will need more than one neuron.**HINT 2:** Same input can go simultaneously to several neurons (and with different weights).**HINT 3:** You can see how `XOR`,`NAND` behave in [https://en.wikipedia.org/wiki/Logic\_gate](https://en.wikipedia.org/wiki/Logic\_gate).
###Code
def XOR(x,y):
################################
##### YOUR CODE STARTS HERE ####
assert False,'function not implemented yet'
##### YOUR CODE ENDS HERE #####
################################
return results
print(XOR(x,y))
###Output
_____no_output_____
###Markdown
*** Exercise 2: Integrate and Fire neuron model (2.5 pt)Integrate-and-Fire neuron (IAF neuron) accumulates voltage until it reaches the *threshold*. After that it outputs/emits a spike and resets the voltage back to the reset value. In this exercise we will model the behaviour of IAF neurons and study their properties. We will add features and behaviours to our model step by step.You need to complete these TODO-s one by one in the given order and report **ALL** figures, answers, interpretations and conclusions you will make during the work. Figure 1: 10 trials of data generated using Integrate-and-Fire neuron model. Adopted from http://math.bu.edu/people/mak/MA665 course materialscomplete the code, plot and report a figure, give an interpretation, etc.**THEORY:** How do we compute a *numerical solution* to the integrate and fire model?In course materials we had:$C_{memberane}(dV/dt)= I_{Total}$, where $I_{Total} = $ is the sum of $I_l$ (leak current) and $I_{input}$ (current injected to the cell)For solving this equation numerically we first notice that $dV$ is the change in $V$ during timeperiod $dt$, so for any moment in time t:$dV = V(t+dt)-V(t)$For clarity, from here onwards we will count time in units equal to dt, so time t+3 means t+3*dt$dV = V(t+1)-V(t)$From there we to rearrange the differential equation to get the new voltage $V(t+1)$ on the left hand side, and $V(t)$ on the right hand side. This means that if we know what's happening at time t, we can calculate what's happening at time $t+1$. Then knowing what happened at $t+1$, we can find $t+2$ and so on. We will calculate V for consequtive timepoints separated by dt.The differential equation for Integrate-and-Fire will become$V(t+1) = V(t) + dt*(I/Cm)$We can use that to predict what the voltage would be at the next moment of time
###Code
# Adding libraries and some backbone functions
import numpy
import matplotlib.pyplot as plt
import numpy as np
from math import sqrt
%matplotlib inline
def Plot_line(data,title,xlabel,ylabel):
plt.figure()
plt.title(title)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.plot(data)
plt.show()
# first, let's set the values for the parameters I (sum total current) and C (capaticy of membrane).
# notice we are not talking about units, because its the relative proportion of I and C that matters
# this is a common practice, not our laziness.
I = 3.0
C = 1.0
# we also need to set the value for dt. This defines the time step we take
#when numerically solving the problem.
# If dt is too small we will need to do too many calculations.
# If dt is too big, say 10ms, our result is not precise and does not reflect reality because the changes
# in membrane potential actually happen a lot faster.
# If you are interested try with bigger dt too.. see how it messes up the system, but don't report it.
# We'll choose (in seconds):
dt = 0.001
# we need to set some initial value, so let the voltage at the time 1 to be -70 mV
V_init= -70.0
#Time in seconds
T=1
#threshold above which the neuron spikes
Vth= -55.0
noise = 1.0
V = [0.0] * int(T/dt)
V[0] = V_init
###Output
_____no_output_____
###Markdown
Now we have an equation and the the value at time 1, we can calculate the value at time 2 and 31. TODO 1: write how to calculate V(1), given V(0), I and C
###Code
################################
##### YOUR CODE STARTS HERE ####
# Append V1 to list
V[1] = ???
##### YOUR CODE ENDS HERE #####
################################
print(V[1])
###Output
_____no_output_____
###Markdown
2. TODO 2: write how to calculate V(2), given V(1), I and C
###Code
################################
##### YOUR CODE STARTS HERE ####
#Append V2 to list
V[2]= ???
##### YOUR CODE ENDS HERE #####
################################
print(V[2])
###Output
_____no_output_____
###Markdown
Let's say that we want to simulate not 2 or 3 time moment, but a longer period of time, let's say 1 second, for that we need to make 1000 steps (since our dt is 0.001 sec)
###Code
T = 1
C = 0.1
number_of_steps = T/dt
###Output
_____no_output_____
###Markdown
3. TODO 3: write how to calculate V(t+1), given V(t), I and C. Plot and describe what happens.
###Code
def Calculate_V(old_v, I,C,dt):
################################
##### YOUR CODE STARTS HERE ####
assert False,'function not implemented yet'
New_V = ???
##### YOUR CODE ENDS HERE #####
################################
return New_V
number_of_spikes=0
for timestep in range(1, int(T/dt)):
V[timestep] = Calculate_V(V[timestep - 1], I, C, dt)
if False:
V[timestep]=V_init
number_of_spikes+=1
############################################
##### Don't forget to change the labels ####
Plot_line(V,'Figure title','Figure x label', 'Figure y label')
###Output
_____no_output_____
###Markdown
[Describe what happens] 4. TODO 4: replace the False with the conditional sentence to check if V(t) is above the threshold level Vth. Plot what happens. Describe and compare with TODO3.
###Code
number_of_spikes=0
for timestep in range(1, int(T/dt)):
V[timestep] = Calculate_V(V[timestep - 1], I, C, dt)
# your code start here
if False:
# your code end here
V[timestep]=V_init
number_of_spikes+=1
############################################
##### Don't forget to change the labels ####
Plot_line(V,'Figure title','Figure x label', 'Figure y label')
###Output
_____no_output_____
###Markdown
[Describe what happens and compare to previous task (3rd one)] 5. TODO 5: increase the current to 10.0. Plot the behaviour and report the count. Use youre code from previous block.
###Code
I=10
################################
##### YOUR CODE STARTS HERE ####
##### YOUR CODE ENDS HERE #####
################################
############################################
##### Don't forget to change the labels ####
Plot_line(V,'Figure title','Figure x label', 'Figure y label')
###Output
_____no_output_____
###Markdown
6. TODO 6: Try also other currents, observe the spike count (no need to plot Vm(t) for each current). Plot it in your head and/or on a graph. What is the relationship - exponential? logarithmic? linear? less than linear?
###Code
################################
##### YOUR CODE STARTS HERE ####
##### YOUR CODE ENDS HERE #####
################################
###Output
_____no_output_____
###Markdown
[What is the relationship - exponential? logarithmic? linear? less than linear?] 7. TODO 7: add the term `noise_amp*sqrt(dt)*np.random.randn()` to the V(t+1) equation. This input reflects the fact that neurons always receive random inputs from their neighbours. Observe the noise on Vm plot. Use your code from previous blocks to calculate V using `Calculate_V_with_noise`
###Code
I = 5.0
def Calculate_V_with_noise(old_v, I,C,dt,noise_amp):
################################
##### YOUR CODE STARTS HERE ####
assert False,'function not implemented yet'
New_V = ???
##### YOUR CODE ENDS HERE #####
################################
return New_V
number_of_spikes=0
for timestep in range(1, int(T/dt)):
V[timestep] = Calculate_V_with_noise(V[timestep - 1], I, C, dt,noise)
# You need to update according to TODO 4
if False:
V[timestep]=V_init
number_of_spikes+=1
print(number_of_spikes)
############################################
##### Don't forget to change the labels ####
Plot_line(V,'Figure title','Figure x label', 'Figure y label')
###Output
_____no_output_____
###Markdown
8. TODO 8: Your task is to change the noise level and plot 3 different raster plots (10 trials on each) with different noise levels. 1. No noise 2. Moderate noise: spiking pattern is somewhat recognizible 3. Too much noise: data appears to be completely random
###Code
def raster(event_times_list, **kwargs):
"""
From https://scimusing.wordpress.com/2013/05/06/making-raster-plots-in-python-with-matplotlib/"""
ax = plt.gca()
for ith, trial in enumerate(event_times_list):
plt.vlines(trial, ith + .5, ith + 1.5, **kwargs)
plt.ylim(.5, len(event_times_list) + .5)
return ax
##### Experiement with the values ####
######################################
noise_levels = {0.0:[], 0.1:[], 0.2:[]}
#######################################
C = 0.1
dt = 0.001
Vinit = -70.0
Vth= -55.0
for iter_noise, spikes in noise_levels.items():
for i in range(0, 10):
V = [0.0] * int(T/dt)
V[0] = V_init
I = 10.0
number_of_spikes = []
for timestep in range(1, int(T/dt)):
V[timestep] = Calculate_V_with_noise(V[timestep - 1], I, C, dt, iter_noise)
if V[timestep] > Vth:
V[timestep] = V_init
number_of_spikes.append(timestep)
spikes.append(number_of_spikes)
for k in noise_levels.keys():
fig = plt.figure()
ax = raster(noise_levels[k])
plt.title('Noise level {} spikes'.format(k))
plt.xlabel('Time (ms)')
plt.ylabel('Trial')
plt.show()
###Output
_____no_output_____
###Markdown
[Descripe what you see] *** Exercise 3: Integrate and fire advanced (1.5)In this exercise we will play with somewhat more realistic versions of integrate-and-fire model.In first stage we add a leak current to the neuron. This current is caused by the ion pumps and it drives the membrane potential slowly towards its resting state. In the previous exercise there was no such current - when the input current stops, the potential remains fixed at the current value. With leak current added, the $Vm$ now slowly returns to its resting state at $-70 mV$.Another difference is that beforehand any current, no matter how small, would eventually drive the neuron to spike. In the current exercise however, weak inputs are balanced out by the leak current.Below is the basic code, look at it and understand it for a better grasp of the concept.
###Code
# Duration of the whole simulation in sec
T = 4
dt = 0.001
#the strength current we inject to the cell, only lasts 3 seconds of 4 (if clause inside the loop below)
I_const = 3.0
C = 0.1
R = 5
V_reset = -75.0
V_init = -70.0
V_tresh = -55.0
def V_leaky_timestep(V_prev, V_init, R, C, dt):
return V_prev + dt * (V_init - V_prev)/(R * C)
def leaky_integrate(T, dt, I_const, C, R, V_reset, V_init, V_tresh):
V = [0.0] * int(T/dt)
V[0] = V_init
number_of_spikes = 0
for timestep in range(1, int(T/dt)):
timestep_voltage = V_leaky_timestep(V[timestep - 1], V_init, R, C, dt)
if timestep < 3000:
timestep_voltage += dt * (I_const/C)
if timestep_voltage >= V_tresh:
V[timestep] = V_reset
number_of_spikes += 1
else:
V[timestep] = timestep_voltage
return V, number_of_spikes
###Output
_____no_output_____
###Markdown
**Your task is to:**1. find the lowest $I_{injected}$ or *I_const* in the code (with 0.1 precision) that leads to a spike. Plot the behaviour at this **I** value and at the value 0.1 below (last one with no spike).
###Code
lowest_spike_current = None
voltages = {}
#Find the lowest possible current that would lead to at east one spike
#You need to change only this parameter
##### YOUR CODE STARTS HERE ####
I_const= 1.0
##### YOUR CODE ENDS HERE #####
V, spike_count = leaky_integrate(T, dt, I_const, C, R, V_reset, V_init, V_tresh)
print(spike_count)
Plot_line(V,'Membrane potential vs time','time [ms]','Vm(t) [mV]')
#We
I_const= I_const-0.1
V, spike_count = leaky_integrate(T, dt, I_const, C, R, V_reset, V_init, V_tresh)
print(spike_count)
Plot_line(V,'Membrane potential vs time','time [ms]','Vm(t) [mV]')
###Output
_____no_output_____
###Markdown
[Describe what happens. Describe also what happens after the stimulus is turned off.] 2. How does firing rate (spikes per second) differ at I = 5.0, 10.0, 25.0, 50.0 with and without (results in TODO 7 in previous exercise) leak current?
###Code
################################
##### YOUR CODE STARTS HERE ####
##### YOUR CODE ENDS HERE #####
################################
###Output
_____no_output_____
###Markdown
**In the second stage** we explore the neuron's behaviour if it receives input from incoming (*presynaptic*) spikes rather than constant injected current. Even though one incoming signal is not enough to activate our neuron, *temporal summation* of inputs happening in a short period of time can lead to the voltage reaching the threshold. The code you need to understand and modify is provided below.
###Code
def synapse_current(eff, spike_strength = 70.0):
"""
spike_strength:this will modify how strong the effect of spikes are
"""
# Constant parameters
#duration of simulation in sec
T = 0.6
dt = 0.001
#we are looking into synapses, so no injected current
I_const = 0.0
#Capacity
C = 0.1
#Resistance
R = 1
#reset potential in mV
V_reset = -75.0
#resting potential in mV
V_init = -70.0
#the neuron threshold
V_tresh = -55.0
tc = -100000
#this defines the shape of the spike (how long the tail is)
taus = 5
V = [0.0] * int(T/dt)
V[0] = V_init
I_synapse = [0.0] * int(T/dt)
I_synapse[0] = 0
for timestep in range(1, int(T/dt)):
seps = 0
#The following has to do with shape of incoming current caused by spikes
for f in eff:
eps = 0
if tc <= f and f <= timestep:
eps = np.exp(-(timestep - f)/(R*C)) - np.exp(-(timestep - f)/taus)
if f < tc and tc <= timestep:
eps = np.exp(-(tc - f)/taus)*(np.exp(-(timestep - tc)/(R*C)) - np.exp(-(timestep - tc)/taus))
seps += eps
#end of spike current calculation
I_synapse[timestep] = spike_strength * 1/(1-taus/(R*C))*seps
V_prev = V[timestep - 1]
if V_prev >= V_tresh:
tc = timestep
V[timestep] = V_reset
else:
V[timestep] = V_prev + dt*(V_init - V_prev)/(R*C) + (dt*I_const/C) + spike_strength * 1/(1-taus/(R*C))*seps
return V, I_synapse
###Output
_____no_output_____
###Markdown
For this task you have to remember that if a neuron gets enough excitatory input in a short period of time, it will activate. The same amount of inputs dispersed over a longer period will not lead to firing. Your task is again to modify code near the *YOUR CODE STARTS HERE* blocks and to answer some questions:1. The neuron only receives one incoming spike at t=300ms. Plot the membrane potential and the strength of the synaptic current in time.
###Code
eff = [300]
V, I = synapse_current(eff, 70.0)
Plot_line(I,'Strength of the synaptic current','Time(m)','Current')
Plot_line(V,'Membrane potential','Time(m)','Voltage')
###Output
_____no_output_____
###Markdown
- Describe how membrane potential and strength of current change over time (peak values, shape). [Your Answer] - How long does it take for the current to decrease by half (half-life)? How is that related to the "taus" parameter?
###Code
################################
##### YOUR CODE STARTS HERE ####
##### YOUR CODE ENDS HERE #####
################################
###Output
_____no_output_____
###Markdown
[Your Answer] - Modify the strength of the synaptic current up to a point where one presynaptic spike is enough to produce a postsynaptic spike.
###Code
################################
##### YOUR CODE STARTS HERE ####
##### YOUR CODE ENDS HERE #####
################################
Plot_line(V,'Membrane potential','Time(m)','Voltage')
###Output
_____no_output_____
###Markdown
2. TODO 2: the neuron will receive a spike every 50ms. Nevertheless it never fires. Why?
###Code
eff_2 = [50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550]
V, I = synapse_current(eff_2)
Plot_line(V,'Membrane potential','Time(m)','Voltage')
###Output
_____no_output_____
###Markdown
[Your Answer] 3. TODO 3: Replace **t3** (time of third spike) with a number bigger than 101. What is the lowest value for t3 so that the receiving neuron does not emit a spike?
###Code
################################
##### YOUR CODE STARTS HERE ####
eff_3 = [100, 101, t3]
##### YOUR CODE ENDS HERE #####
################################
V, I = synapse_current(eff_3)
Plot_line(V,'Membrane potential','Time(m)','Voltage')
###Output
_____no_output_____
###Markdown
*** Exercise 4: Hodgkin-Huxley neuron model (1.5)Hodgkin-Huxley model is considered to be the most important computational neuronal model in the neuroscience today. We have the model already implemented. Follow the instructions and report all thoughts, interpretations and conclusions you will have during the work.
###Code
# Hodgkin-Huxley equations.
# See Gerstner and Kistler, Spiking Neuron Models, 2002, Section 2.2.
# You'll see I've scaled the voltage by 65 in the equation that updates V
# and the auxillary functions. Hodgkin and Huxley set the resting voltage
# of the neuron to 0 mV and we've set it here to -65 mV (the value accepted
# today).
# INPUTS
# I0 = input current.
# T0 = total time to simulate (in [ms]).
#
# OUTPUTS
# V = the voltage of neuron.
# m = activation variable for Na-current.
# h = inactivation variable for Na-current.
# n = activation variable for K-current.
# t = the time axis of the simulation (useful for plotting)
import math
def HH0(I0, T0):
dt = 0.01
T = int(np.ceil(T0/dt))
gNa = 120
ENa = 115
gK = 36
EK = -12
gL = 0.3
ERest = 10.6
t = list(map((lambda x: x * dt), range(1, T+1)))
V = np.zeros((T, 1))
m = np.zeros((T, 1))
h = np.zeros((T, 1))
n = np.zeros((T, 1))
V[0] = -70
m[0] = 0.05
h[0] = 0.54
n[0] = 0.34
for i in range(T-1):
V[i+1] = V[i] + dt * (gNa * m[i] ** 3 * h[i] * (ENa - (V[i] + 65))
+ gK * n[i] ** 4 * (EK - (V[i] + 65)) + gL * (ERest - (V[i] + 65)) + I0)
m[i+1] = m[i] + dt * (alphaM(V[i]) * (1 - m[i]) - betaM(V[i]) * m[i])
h[i+1] = h[i] + dt * (alphaH(V[i]) * (1 - h[i]) - betaH(V[i]) * h[i])
n[i+1] = n[i] + dt * (alphaN(V[i]) * (1 - n[i]) - betaN(V[i]) * n[i])
return V, m, h, n, t
def alphaM(V):
aM = (2.5 - 0.1 * (V + 65)) / (math.exp(2.5 - 0.1 * (V + 65)) - 1)
return aM
def betaM(V):
bM = 4 * math.exp(-(V + 65) / 18)
return bM
def alphaH(V):
aH = 0.07 * math.exp(-(V + 65) / 20)
return aH
def betaH(V):
bH = 1. / (math.exp(3.0 - 0.1 * (V + 65)) + 1)
return bH
def alphaN(V):
aN = (0.1 - 0.01 * (V + 65)) / (math.exp(1 - 0.1 * (V + 65)) - 1)
return aN
def betaN(V):
bN = 0.125 * math.exp(-(V + 65) / 80)
return bN
###Output
_____no_output_____
###Markdown
1) Explain the nature of m, h and n in terms of Na+ and K+ ion channels being open or closed (discussed in lecture). Your answer 2) Plot the voltage respect to time for I0=5.0. Modify I0 and report the lowest I0 that still causes a spike in the beginning of simulation. Also plot the lowest I0 that causes continuous spiking till the end of simulation. (Precision of your answer should be at least 0.1)
###Code
I0 = 5;
T0 = 400; #duration in ms
################################
##### YOUR CODE STARTS HERE ####
##### YOUR CODE ENDS HERE #####
################################
###Output
_____no_output_____
###Markdown
Your answer 3) Count the number of spikes at different I0 values (code provided). **Note that** this series of simulations can take up to 5-10 min, do not panic if it takes time. Describe what you see. Why this *input-output curve* is more biologically realistic than the input-output relation of IAF? How is the non-linearity of the relationship related to modelling the ion channels and not simply resetting the membrane potential instantaneously as soon as threshold is reached?
###Code
T0 = 500
currents = [3, 4, 5, 6, 6.1, 6.2, 6.3, 6.4, 6.5 , 6.6, 6.7, 6.8, 6.9, 7.0, 8, 9, 10, 11, 12, 13, 14, 15]
spikes = np.zeros(len(currents))
for i in range (len(currents)):
I0 = currents [i]
V, m, h, n, t = HH0(I0,T0)
count = 0 # count spikes
for j in range(len(V)):
if V[j] > 0:
count += 1
V[j:j+300] = -70.0
spikes [i] = count
plt. plot ( currents , spikes )
plt. xlabel ('Current')
plt. ylabel ('Number of spikes')
plt. show ()
###Output
_____no_output_____ |
modelproject/ModelProject4.ipynb | ###Markdown
Model Project *** _In this model project we will present a simple Robinson Crusoe production economy. We will solve the model analytically using sympy, evaluate the markets in different parameterizations of price and wage and visualize one solution_ The theoretical model:Imagine that Crusoe is schizophenic and makes his decisions as a manager and consumer separately. His decsisions are however, guided by market prices - labor wage and consumption price. It is assumed that Crusoe is endowed with a total time endowment of 60 hours per week. **Producer problem:** When Crusoe acts as a manager he seeks to maximize his profit subject to the production function while taking the price and wage as given. \\[ \max_{x,l} px-wl \\]subject to\\[ x=f(l)=Al^\beta \\] Where p is the market price of the good, w is the wage, x is the good and l is labor. A and $\beta$ reflect technology and returns to scale, respectively. **Consumer problem:** When acting as a consumer, Crusoe maximizes his utility of the consumption good x and leisure (the latter is defined as whats left of total time endowment when working l hours)As the consumer and owner of the firm, Crusoe will receive profit from "selling" his labor in the producer problem.\\[ \max_{x,l} u(x,(L-l))=x^\alpha(L-l)^{(1-\alpha)} \\]subject to\\[ px=wl+\pi(w,p) \\] When solving the model, we need to derive demand and supply expressions for labor and the consumption good. When equalizing supply and demand in one of the markets and acquiring an equilibrium, it follows from Walras' law that the other market will also reach an equilibrium.Hence, the model is solved by first optimizing in the markets separately, deriving the supply and demand expressions, then equalizing supply and demand across markets and solving for the consumption price and labor wage. The analytical solution: Install packages.
###Code
import numpy as np
import sympy as sm
%matplotlib inline
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
In the follwoing, we will primarily analyze the model using sympy. However, due to computational problems when solving the model using sympy with algebraic expressions only (see https://docs.sympy.org/0.7.6/tutorial/solvers.html), we define some of the parameter values as the following:
###Code
A = 13.15
beta = 0.5
alpha = 2/3
L = 60
I = 10
###Output
_____no_output_____
###Markdown
Producer problem:The producer problem is quite simple and the easiest way to solve the maximization problem is to substitute the constraint (production function) into the profit function, deriving the reduced form.
###Code
# Profit function
def prof(x,l):
return p*x-w*l
# Production function
def prod(l):
return A*l**beta
# Reduced form - subsituting production function into profit function
def reduced(l):
return prof(prod(l),l)
###Output
_____no_output_____
###Markdown
Substituting for x and maximizing w.r.t labor (l):
###Code
# Optimization using sympy diff:
focProd = sm.diff(reduced(l),l)
# Isolating labor and thus, deriving labor demand:
laborDemand = sm.solve(focProd,l)
# Finding the supply of goods:
profSubs = prof(x,laborDemand[0])
goodSupply = sm.solve(profSubs,x)
# Printing labor demand and goods supply
print("Labor demand: lD=", laborDemand)
print("supply of goods: xS=", goodSupply)
###Output
Labor demand: lD= [43.230625*p**2/w**2]
supply of goods: xS= [43.230625*p/w]
###Markdown
Profit, as a function of w and p, is derived by inserting labor demand and goods supply into the profit function:
###Code
Profit = (1-beta)*(A*p)**(1/(1-beta))*(beta/w)**(beta/(1-beta))
print(Profit)
###Output
43.230625*p**2.0*(1/w)**1.0
###Markdown
Consumer problem:The consumer problem is not as simple as the producer problem and we solve this using sympy's version of lagrange.
###Code
# The variables to maximize w.r.t
x, l = sm.var('x,l',real=True)
# The objective function
util = x**alpha*(L-l)**(1-alpha)
# Budget constraint
bud = p*x-w*l-Profit
# Specifying the shadow price
lam = sm.symbols('lambda',real = True)
# Setting up the Lagragian
L = util-lam*bud
# Differentiating w.r.t x and l
gradL = [sm.diff(L,c) for c in [x,l]]
# The focs and the shadow price
KKT_eqs = gradL + [bud]
KKT_eqs
# Showing the stationary points solving the constrained problem
stationary_points = sm.solve(KKT_eqs,[x,l,lam],dict=True)
stationary_points
###Output
_____no_output_____
###Markdown
We note that all proposed solutions of l and x are the same. The only variation between the three proposed solutions is the shadow price. Hence, we proceed with the solutions of l and x and derive the optimal wage. This is done by equalizing supply and demand of labor, hence, obtaining equilibrium in one market.
###Code
equalLab = sm.Eq(-14.4102083333333*p**2/w**2+40,43.230625*p**2/w**2)
opt_wag = sm.solve(equalLab,w)
print("Optimal wage depending on price", opt_wag)
###Output
Optimal wage depending on price [-1.20042527186549*p, 1.20042527186549*p]
###Markdown
Since the price cannot be negative the solution is 1.2*p. This means that any set of prices will imply an approximately 1.2 times higher wage with equilibrium in both markets. In the following, we evaluate the optimal wage expression and labor demand in different values of p and hence, w.
###Code
# We convert the symbolic optimal wage expression a function depending of p
_opt_wag = sm.lambdify(p,opt_wag[1])
# We evaluate the wage in a price of 10 and 1, respectively.
p1_vec = np.array([10,1])
wages = _opt_wag(p1_vec)
print("Optimal wage, when price is 10 and 1, respectively: ",wages)
# We evaluate the labor demand in the prices and wages. First making the labor demand
# expression a function depending on p and w.
_lab_dem = sm.lambdify((p,w),laborDemand)
p1_vec = np.array([10,1])
# Labor demand evaluated in price and wages.
labor_dem1 = [_lab_dem(p1_vec[0],wages[0]), _lab_dem(p1_vec[1],wages[1])]
print("Labor demand evaluated in combination of wages and prices: ", labor_dem1)
# Labor supply from lagrange optimization problem
_lab_sup = [-14.4102083333333*p1_vec[0]**2/wages[0]**2+40,-14.4102083333333*p1_vec[1]**2/wages[1]**2+40]
print("Labor supply evaluated in combination of wages and prices: ", _lab_sup)
# Profit in different combination of wages and prices
Profit_eval = 43.230625*p1_vec**2.0*(1/wages)**1.0
print("Profit evaluated in combination of prices and wages:", [Profit_eval[0], Profit_eval[1]])
# Demand of consumption good
good_opt = prod(30)
print("Demand and supply of consumption good: ", good_opt)
# Utility in different combination of wages and prices
_utility = sm.lambdify((x,l),util)
print("Utility evaluated in combination of prices and wages:", _utility(good_opt,30))
###Output
Optimal wage, when price is 10 and 1, respectively: [12.00425272 1.20042527]
Labor demand evaluated in combination of wages and prices: [[29.999999999999954], [29.999999999999957]]
Labor supply evaluated in combination of wages and prices: [30.00000000000004, 30.000000000000036]
Profit evaluated in combination of prices and wages: [360.12758155964644, 36.01275815596465]
Demand and supply of consumption good: 72.02551631192935
Utility evaluated in combination of prices and wages: 53.78956164406131
###Markdown
Visualization: We now visualize the solution when the price is 1 and the wage is 1.2.
###Code
def prof_ny(x,l,profit):
return 1*x-1.2*l+profit
def util_ny(l, profit):
return (profit/((L-l)**(1-alpha)))**(1/alpha)
def budget_ny(x,l,profit):
return x-1.2*l+profit
fig = plt.figure(figsize=(8,4),dpi=100)
labor_vec = np.linspace(0,59,500)
goods_vec = np.linspace(0,140,500)
ax_left = fig.add_subplot(1,2,1)
ax_left.plot(labor_vec, prod(labor_vec))
ax_left.plot(labor_vec, prof_ny(goods_vec,labor_vec,0))
ax_left.plot(labor_vec, prof_ny(goods_vec,labor_vec,Profit_eval[1]))
ax_left.set_title('Price-taking producer')
ax_left.set_xlabel('l')
ax_left.set_ylabel('x')
ax_right = fig.add_subplot(1,2,2)
ax_right.plot(labor_vec, util_ny(labor_vec,54))
ax_right.plot(labor_vec, budget_ny(goods_vec,labor_vec,Profit_eval[1]))
ax_right.set_title('Price-taking consumer')
ax_right.set_xlabel('l')
ax_right.set_ylabel('x')
###Output
_____no_output_____ |
Python Data Science Toolbox -Part 1/Default arguments- variable-length arguments and scope/06.Functions with multiple default arguments.ipynb | ###Markdown
You've now defined a function that uses a default argument - don't stop there just yet! You will now try your hand at defining a function with more than one default argument and then calling this function in various ways.After defining the function, you will call it by supplying values to all the default arguments of the function. Additionally, you will call the function by not passing a value to one of the default arguments - see how that changes the output of your function! Complete the function header with the function name shout_echo. It accepts an argument word1, a default argument echo with default value 1 and a default argument intense with default value False, in that order. In the body of the if statement, make the string object echo_word upper case by applying the method .upper() on it. Call shout_echo() with the string, "Hey", the value 5 for echo and the value True for intense. Assign the result to with_big_echo. Call shout_echo() with the string "Hey" and the value True for intense. Assign the result to big_no_echo.
###Code
# Define shout_echo
def shout_echo(word1, echo=1, intense = False): # default value False
"""Concatenate echo copies of word1 and three
exclamation marks at the end of the string."""
# Concatenate echo copies of word1 using *: echo_word
echo_word = word1 * echo
# Make echo_word uppercase if intense is True
if intense is True:
# Make uppercase and concatenate '!!!': echo_word_new
echo_word_new = word1.upper()*echo + '!!!'
else:
# Concatenate '!!!' to echo_word: echo_word_new
echo_word_new = echo_word + '!!!'
# Return echo_word_new
return echo_word_new
# Call shout_echo() with "Hey", echo=5 and intense=True: with_big_echo
with_big_echo = shout_echo("Hey",5 , intense=True)
# Call shout_echo() with "Hey" and intense=True: big_no_echo
big_no_echo = shout_echo("Hey",intense= True)
# Print values
print(with_big_echo)
print(big_no_echo)
###Output
HEYHEYHEYHEYHEY!!!
HEY!!!
|
wandb/run-20210518_110409-1r6cfx2e/tmp/code/main.ipynb | ###Markdown
WorkFlow Classes Load the data Test Modelling Modelling **** Classes
###Code
NAME = "change the conv2d"
BATCH_SIZE = 32
import os
import cv2
import torch
import numpy as np
def load_data(img_size=112):
data = []
index = -1
labels = {}
for directory in os.listdir('./data/'):
index += 1
labels[f'./data/{directory}/'] = [index,-1]
print(len(labels))
for label in labels:
for file in os.listdir(label):
filepath = label + file
img = cv2.imread(filepath,cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img,(img_size,img_size))
img = img / 255.0
data.append([
np.array(img),
labels[label][0]
])
labels[label][1] += 1
for _ in range(12):
np.random.shuffle(data)
print(len(data))
np.save('./data.npy',data)
return data
import torch
def other_loading_data_proccess(data):
X = []
y = []
print('going through the data..')
for d in data:
X.append(d[0])
y.append(d[1])
print('splitting the data')
VAL_SPLIT = 0.25
VAL_SPLIT = len(X)*VAL_SPLIT
VAL_SPLIT = int(VAL_SPLIT)
X_train = X[:-VAL_SPLIT]
y_train = y[:-VAL_SPLIT]
X_test = X[-VAL_SPLIT:]
y_test = y[-VAL_SPLIT:]
print('turning data to tensors')
X_train = torch.from_numpy(np.array(X_train))
y_train = torch.from_numpy(np.array(y_train))
X_test = torch.from_numpy(np.array(X_test))
y_test = torch.from_numpy(np.array(y_test))
return [X_train,X_test,y_train,y_test]
###Output
_____no_output_____
###Markdown
**** Load the data
###Code
REBUILD_DATA = True
if REBUILD_DATA:
data = load_data()
np.random.shuffle(data)
X_train,X_test,y_train,y_test = other_loading_data_proccess(data)
###Output
36
2515
###Markdown
Test Modelling
###Code
import torch
import torch.nn as nn
import torch.nn.functional as F
# class Test_Model(nn.Module):
# def __init__(self):
# super().__init__()
# self.conv1 = nn.Conv2d(1, 6, 5)
# self.pool = nn.MaxPool2d(2, 2)
# self.conv2 = nn.Conv2d(6, 16, 5)
# self.fc1 = nn.Linear(16 * 25 * 25, 120)
# self.fc2 = nn.Linear(120, 84)
# self.fc3 = nn.Linear(84, 36)
# def forward(self, x):
# x = self.pool(F.relu(self.conv1(x)))
# x = self.pool(F.relu(self.conv2(x)))
# x = x.view(-1, 16 * 25 * 25)
# x = F.relu(self.fc1(x))
# x = F.relu(self.fc2(x))
# x = self.fc3(x)
# return x
class Test_Model(nn.Module):
def __init__(self):
super().__init__()
self.pool = nn.MaxPool2d(2, 2)
self.conv1 = nn.Conv2d(1, 32, 5)
self.conv3 = nn.Conv2d(32,64,5)
self.conv2 = nn.Conv2d(64, 128, 5)
self.fc1 = nn.Linear(128 * 10 * 10, 512)
self.fc2 = nn.Linear(512, 256)
self.fc4 = nn.Linear(256,128)
self.fc3 = nn.Linear(128, 36)
def forward(self, x,shape=False):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv3(x)))
x = self.pool(F.relu(self.conv2(x)))
if shape:
print(x.shape)
x = x.view(-1, 128 * 10 * 10)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc4(x))
x = self.fc3(x)
return x
device = torch.device('cuda')
model = Test_Model().to(device)
preds = model(X_test.reshape(-1,1,112,112).float().to(device),True)
preds[0]
optimizer = torch.optim.SGD(model.parameters(),lr=0.1)
criterion = nn.CrossEntropyLoss()
EPOCHS = 5
loss_logs = []
from tqdm import tqdm
PROJECT_NAME = "Sign-Language-Recognition"
def test(net,X,y):
correct = 0
total = 0
net.eval()
with torch.no_grad():
for i in range(len(X)):
real_class = torch.argmax(y[i]).to(device)
net_out = net(X[i].view(-1,1,112,112).to(device).float())
net_out = net_out[0]
predictied_class = torch.argmax(net_out)
if predictied_class == real_class:
correct += 1
total += 1
return round(correct/total,3)
import wandb
len(os.listdir('./data/'))
import random
# index = random.randint(0,29)
# print(index)
# wandb.init(project=PROJECT_NAME,name=NAME)
# for _ in tqdm(range(EPOCHS)):
# for i in range(0,len(X_train),BATCH_SIZE):
# X_batch = X_train[i:i+BATCH_SIZE].view(-1,1,112,112).to(device)
# y_batch = y_train[i:i+BATCH_SIZE].to(device)
# model.to(device)
# preds = model(X_batch.float())
# loss = criterion(preds,torch.tensor(y_batch,dtype=torch.long))
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# wandb.log({'loss':loss.item(),'accuracy':test(model,X_train,y_train)*100,'val_accuracy':test(model,X_test,y_test)*100,'pred':torch.argmax(preds[index]),'real':torch.argmax(y_batch[index])})
# wandb.finish()
import matplotlib.pyplot as plt
import pandas as pd
df = pd.Series(loss_logs)
df.plot.line(figsize=(12,6))
test(model,X_test,y_test)
test(model,X_train,y_train)
preds
X_testing = X_train
y_testing = y_train
correct = 0
total = 0
model.eval()
with torch.no_grad():
for i in range(len(X_testing)):
real_class = torch.argmax(y_testing[i]).to(device)
net_out = model(X_testing[i].view(-1,1,112,112).to(device).float())
net_out = net_out[0]
predictied_class = torch.argmax(net_out)
# print(predictied_class)
if str(predictied_class) == str(real_class):
correct += 1
total += 1
print(round(correct/total,3))
# for real,pred in zip(y_batch,preds):
# print(real)
# print(torch.argmax(pred))
# print('\n')
###Output
_____no_output_____
###Markdown
Modelling
###Code
# conv2d_output
# conv2d_1_ouput
# conv2d_2_ouput
# output_fc1
# output_fc2
# output_fc4
# max_pool2d_keranl
# max_pool2d
# num_of_linear
# activation
# best num of epochs
# best optimizer
# best loss
## best lr
class Test_Model(nn.Module):
def __init__(self,conv2d_output=128,conv2d_1_ouput=32,conv2d_2_ouput=64,output_fc1=512,output_fc2=256,output_fc4=128,output=36,activation=F.relu,max_pool2d_keranl=2):
super().__init__()
print(conv2d_output)
print(conv2d_1_ouput)
print(conv2d_2_ouput)
print(output_fc1)
print(output_fc2)
print(output_fc4)
print(activation)
self.conv2d_output = conv2d_output
self.pool = nn.MaxPool2d(max_pool2d_keranl)
self.conv1 = nn.Conv2d(1, conv2d_1_ouput, 5)
self.conv3 = nn.Conv2d(conv2d_1_ouput,conv2d_2_ouput,5)
self.conv2 = nn.Conv2d(conv2d_2_ouput, conv2d_output, 5)
self.fc1 = nn.Linear(conv2d_output * 10 * 10, output_fc1)
self.fc2 = nn.Linear(output_fc1, output_fc2)
self.fc4 = nn.Linear(output_fc2,output_fc4)
self.fc3 = nn.Linear(output_fc4, output)
self.activation = activation
def forward(self, x,shape=False):
x = self.pool(self.activation(self.conv1(x)))
x = self.pool(self.activation(self.conv3(x)))
x = self.pool(self.activation(self.conv2(x)))
if shape:
print(x.shape)
x = x.view(-1, self.conv2d_output * 10 * 10)
x = self.activation(self.fc1(x))
x = self.activation(self.fc2(x))
x = self.activation(self.fc4(x))
x = self.fc3(x)
return x
# conv2d_output
# conv2d_1_ouput
# conv2d_2_ouput
# output_fc1
# output_fc2
# output_fc4
# max_pool2d_keranl
# max_pool2d
# num_of_linear
# best num of epochs
# best loss
## best lr
# batch size
EPOCHS = 3
BATCH_SIZE = 32
# conv2d_output
# conv2d_1_ouput
# conv2d_2_ouput
# output_fc1
# output_fc2
# output_fc4
# max_pool2d_keranl
# max_pool2d
# num_of_linear
# activation =
# best num of epochs
# best optimizer =
# best loss
## best lr
def get_loss(criterion,y,model,X):
preds = model(X.view(-1,1,112,112).to(device).float())
preds.to(device)
loss = criterion(preds,torch.tensor(y,dtype=torch.long).to(device))
loss.backward()
return loss.item()
optimizers = [torch.optim.SGD,torch.optim.Adadelta,torch.optim.Adagrad,torch.optim.Adam,torch.optim.AdamW,torch.optim.SparseAdam,torch.optim.Adamax]
for optimizer in optimizers:
model = Test_Model(activation=nn.SELU())
criterion = optimizer(model.parameters(),lr=0.1)
wandb.init(project=PROJECT_NAME,name=f'optimizer-{optimizer}')
for _ in tqdm(range(EPOCHS)):
for i in range(0,len(X_train),BATCH_SIZE):
X_batch = X_train[i:i+BATCH_SIZE]
y_batch = y_train[i:i+BATCH_SIZE]
model.to(device)
preds = model(X_batch.float())
loss = criterion(preds,torch.tensor(y_batch,dtype=torch.long))
optimizer.zero_grad()
loss.backward()
optimizer.step()
wandb.log({'loss':loss.item(),'accuracy':test(model,X_train,y_train)*100,'val_accuracy':test(model,X_test,y_test)*100,'pred':torch.argmax(preds[index]),'real':torch.argmax(y_batch[index]),'val_loss':get_loss(criterion,y_test,model,X_test)})
print(f'{torch.argmax(preds[index])} \n {y_batch[index]}')
print(f'{torch.argmax(preds[1])} \n {y_batch[1]}')
print(f'{torch.argmax(preds[2])} \n {y_batch[2]}')
print(f'{torch.argmax(preds[3])} \n {y_batch[3]}')
print(f'{torch.argmax(preds[4])} \n {y_batch[4]}')
wandb.finish()
# activations = [nn.ELU(),nn.LeakyReLU(),nn.PReLU(),nn.ReLU(),nn.ReLU6(),nn.RReLU(),nn.SELU(),nn.CELU(),nn.GELU(),nn.SiLU(),nn.Tanh()]
# for activation in activations:
# model = Test_Model(activation=activation)
# optimizer = torch.optim.SGD(model.parameters(),lr=0.1)
# criterion = nn.CrossEntropyLoss()
# index = random.randint(0,29)
# print(index)
# wandb.init(project=PROJECT_NAME,name=f'activation-{activation}')
# for _ in tqdm(range(EPOCHS)):
# for i in range(0,len(X_train),BATCH_SIZE):
# X_batch = X_train[i:i+BATCH_SIZE].view(-1,1,112,112).to(device)
# y_batch = y_train[i:i+BATCH_SIZE].to(device)
# model.to(device)
# preds = model(X_batch.float())
# loss = criterion(preds,torch.tensor(y_batch,dtype=torch.long))
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# wandb.log({'loss':loss.item(),'accuracy':test(model,X_train,y_train)*100,'val_accuracy':test(model,X_test,y_test)*100,'pred':torch.argmax(preds[index]),'real':torch.argmax(y_batch[index]),'val_loss':get_loss(criterion,y_test,model,X_test)})
# print(f'{torch.argmax(preds[index])} \n {y_batch[index]}')
# print(f'{torch.argmax(preds[1])} \n {y_batch[1]}')
# print(f'{torch.argmax(preds[2])} \n {y_batch[2]}')
# print(f'{torch.argmax(preds[3])} \n {y_batch[3]}')
# print(f'{torch.argmax(preds[4])} \n {y_batch[4]}')
# wandb.finish()
for real,pred in zip(y_batch,preds):
print(real)
print(torch.argmax(pred))
print('\n')
###Output
_____no_output_____ |
notebook/plotting.ipynb | ###Markdown
Polar plotting of wave spectra using xarray api
###Code
WAVESPECTRA_PATH = "/source/wavespectra"
dset = read_swan(
os.path.join(WAVESPECTRA_PATH, "tests/sample_files/swanfile.spec"),
as_site=True
)
darray = dset.isel(site=0).efth.sortby("dir")
###Output
_____no_output_____
###Markdown
Simplest usageThe `SpecArray` accessor provides an api similar to `DataArray` for plotting polar 2D spectrum with some predefined defaults.
###Code
darray.isel(time=0).spec.plot.contourf()
###Output
_____no_output_____
###Markdown
Spectra can be plotted in the period space
###Code
darray.isel(time=0).spec.plot.contourf(as_period=True)
###Output
_____no_output_____
###Markdown
By default the log10(efth) is plotted but the real values could be plotted instead:
###Code
darray.isel(time=0).spec.plot.contourf(as_period=True, as_log10=False)
###Output
_____no_output_____
###Markdown
Radius extent can be controlled either by slicing frequencies or by setting axis property:
###Code
# Using DataArray.sel, frequencies will be contained within the slicing limits
darray.isel(time=0).sel(freq=slice(0.0, 0.2)).spec.plot.contourf()
# Using SpecArray.split, frequencies are interpolated at the slicing limits except for fmin which is < min(freq)
plt.figure()
darray.isel(time=0).spec.split(fmin=0, fmax=0.2).spec.plot.contourf()
# Using axis properties
plt.figure()
darray.isel(time=0).spec.plot.contourf()
ax = plt.gca()
ax.set_rmin(0)
ax.set_rmax(0.2)
###Output
_____no_output_____
###Markdown
Using xarray / matplotlib properties
###Code
darray.isel(time=0).spec.plot.contourf(
cmap="viridis",
vmin=-5,
vmax=-2,
levels=15,
add_colorbar=False,
)
###Output
_____no_output_____
###Markdown
Multiple spectra
###Code
darray.spec.plot.contourf(col="time", col_wrap=3, levels=15, figsize=(15,8), vmax=-1)
###Output
_____no_output_____
###Markdown
Clean axis, useful if plotting up many small axes for overview
###Code
darray.sel(freq=slice(0, 0.2)).spec.plot.contourf(
col="time",
col_wrap=3,
levels=15,
figsize=(15,8),
vmax=-1,
clean_radius=True,
clean_sector=True
)
###Output
_____no_output_____ |
examples/notebooks/train.ipynb | ###Markdown
Guild AI Get Started Mock Training ScriptThis is a Notebook implementation of the mock training script used in [Guild AI Quick Start](https://my.guild.ai/start). Contents This Notebook is designed to run as an experiment using `guild run train.ipynb`. For an interactive Notebook session using `guild.ipy` see [get-started.ipynb](get-started.ipynb). Mock training functionCreate a mock training script. This function doesn’t actually train anything, but simulates the training process of accepting hyperparameters as inputs and generating a loss.
###Code
import numpy as np
def train(x, noise=0.1):
loss = (np.sin(5 * x) * (1 - np.tanh(x ** 2)) + np.random.randn() * noise)
print("loss: %f" % loss)
###Output
_____no_output_____
###Markdown
[Function credit: *skopt API documentation*](https://scikit-optimize.github.io/)Based on our mock training function, the "best" result (i.e. the run with the lowest *loss*) should be the run where `x` is close to `-0.3`. Because there's a random component (i.e. the `noise` parameter) your results may show best results with different values for `x`.Below is an image that plots *loss* for values of *x*, showing the lowest loss where x is approximately `-0.3`.[Image credit: *Bayesian optimization with skopt*](https://scikit-optimize.github.io/notebooks/bayesian-optimization.html) Sample useWe define sample values f
###Code
x = -0.3
noise = 0.1
train(x, noise)
###Output
loss: -0.963175
###Markdown
Deep Learning segmentation suite dessigned for 2D microscopy image segmentation---The corresponging **[GitHub repository](https://github.com/esgomezm/microscopy-dl-suite-tf)**, developed by the authors of the paper. ---**Please cite the corresponding paper** Install required librariesIn case it returns "restart session" at the end fo the installation, please click it and start from the beginning.
###Code
!git clone https://github.com/esgomezm/microscopy-dl-suite-tf
!pip3 install -r microscopy-dl-suite-tf/dl-suite/requirements.txt
###Output
Cloning into 'microscopy-dl-suite-tf'...
remote: Enumerating objects: 220, done.[K
remote: Counting objects: 100% (220/220), done.[K
remote: Compressing objects: 100% (189/189), done.[K
remote: Total 220 (delta 102), reused 60 (delta 29), pack-reused 0[K
Receiving objects: 100% (220/220), 4.27 MiB | 4.01 MiB/s, done.
Resolving deltas: 100% (102/102), done.
Requirement already satisfied: click in /usr/local/lib/python3.7/dist-packages (from -r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 1)) (7.1.2)
Collecting opencv-python-headless
Downloading opencv_python_headless-4.5.4.60-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (47.6 MB)
[K |████████████████████████████████| 47.6 MB 74 kB/s
[?25hRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from -r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 3)) (1.19.5)
Requirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from -r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 4)) (4.62.3)
Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from -r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 5)) (1.4.1)
Collecting tensorflow==2.2.0
Downloading tensorflow-2.2.0-cp37-cp37m-manylinux2010_x86_64.whl (516.2 MB)
[K |████████████████████████████████| 516.2 MB 4.4 kB/s
[?25hRequirement already satisfied: scikit-image in /usr/local/lib/python3.7/dist-packages (from -r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 7)) (0.18.3)
Collecting SimpleITK
Downloading SimpleITK-2.1.1-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (48.4 MB)
[K |████████████████████████████████| 48.4 MB 1.5 MB/s
[?25hCollecting imreg_dft
Downloading imreg_dft-2.0.0.tar.gz (101 kB)
[K |████████████████████████████████| 101 kB 10.7 MB/s
[?25hRequirement already satisfied: openpyxl in /usr/local/lib/python3.7/dist-packages (from -r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 10)) (2.5.9)
Requirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from -r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 11)) (1.1.5)
Collecting plantcv
Downloading plantcv-3.13.4-py3-none-any.whl (230 kB)
[K |████████████████████████████████| 230 kB 47.9 MB/s
[?25hRequirement already satisfied: seaborn in /usr/local/lib/python3.7/dist-packages (from -r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 13)) (0.11.2)
Requirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (1.42.0)
Requirement already satisfied: google-pasta>=0.1.8 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (0.2.0)
Collecting tensorflow-estimator<2.3.0,>=2.2.0
Downloading tensorflow_estimator-2.2.0-py2.py3-none-any.whl (454 kB)
[K |████████████████████████████████| 454 kB 55.7 MB/s
[?25hCollecting tensorboard<2.3.0,>=2.2.0
Downloading tensorboard-2.2.2-py3-none-any.whl (3.0 MB)
[K |████████████████████████████████| 3.0 MB 11.5 MB/s
[?25hRequirement already satisfied: protobuf>=3.8.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (3.17.3)
Requirement already satisfied: wrapt>=1.11.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (1.13.3)
Requirement already satisfied: astunparse==1.6.3 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (1.6.3)
Requirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (0.12.0)
Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (1.1.0)
Requirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (0.37.0)
Collecting gast==0.3.3
Downloading gast-0.3.3-py2.py3-none-any.whl (9.7 kB)
Collecting h5py<2.11.0,>=2.10.0
Downloading h5py-2.10.0-cp37-cp37m-manylinux1_x86_64.whl (2.9 MB)
[K |████████████████████████████████| 2.9 MB 50.9 MB/s
[?25hRequirement already satisfied: six>=1.12.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (1.15.0)
Requirement already satisfied: keras-preprocessing>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (1.1.2)
Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (3.3.0)
Requirement already satisfied: google-auth<2,>=1.6.3 in /usr/local/lib/python3.7/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (1.35.0)
Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (57.4.0)
Requirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (2.23.0)
Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (1.8.0)
Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (0.4.6)
Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (1.0.1)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (3.3.6)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (0.2.8)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (4.2.4)
Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (4.8)
Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (1.3.0)
Requirement already satisfied: importlib-metadata>=4.4 in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (4.8.2)
Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=4.4->markdown>=2.6.8->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (3.6.0)
Requirement already satisfied: typing-extensions>=3.6.4 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=4.4->markdown>=2.6.8->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (3.10.0.2)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.7/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (0.4.8)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (3.0.4)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (2.10)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (1.24.3)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3,>=2.21.0->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (2021.10.8)
Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 6)) (3.1.1)
Requirement already satisfied: matplotlib!=3.0.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-image->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 7)) (3.2.2)
Requirement already satisfied: pillow!=7.1.0,!=7.1.1,>=4.3.0 in /usr/local/lib/python3.7/dist-packages (from scikit-image->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 7)) (7.1.2)
Requirement already satisfied: PyWavelets>=1.1.1 in /usr/local/lib/python3.7/dist-packages (from scikit-image->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 7)) (1.2.0)
Requirement already satisfied: imageio>=2.3.0 in /usr/local/lib/python3.7/dist-packages (from scikit-image->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 7)) (2.4.1)
Requirement already satisfied: tifffile>=2019.7.26 in /usr/local/lib/python3.7/dist-packages (from scikit-image->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 7)) (2021.11.2)
Requirement already satisfied: networkx>=2.0 in /usr/local/lib/python3.7/dist-packages (from scikit-image->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 7)) (2.6.3)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 7)) (1.3.2)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 7)) (2.8.2)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 7)) (3.0.6)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 7)) (0.11.0)
Requirement already satisfied: jdcal in /usr/local/lib/python3.7/dist-packages (from openpyxl->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 10)) (1.4.1)
Requirement already satisfied: et-xmlfile in /usr/local/lib/python3.7/dist-packages (from openpyxl->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 10)) (1.1.0)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 11)) (2018.9)
Collecting dask-jobqueue
Downloading dask_jobqueue-0.7.3-py2.py3-none-any.whl (40 kB)
[K |████████████████████████████████| 40 kB 6.0 MB/s
[?25hRequirement already satisfied: statsmodels in /usr/local/lib/python3.7/dist-packages (from plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (0.10.2)
Requirement already satisfied: scikit-learn in /usr/local/lib/python3.7/dist-packages (from plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (1.0.1)
Requirement already satisfied: opencv-python in /usr/local/lib/python3.7/dist-packages (from plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (4.1.2.30)
Requirement already satisfied: plotnine in /usr/local/lib/python3.7/dist-packages (from plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (0.6.0)
Requirement already satisfied: dask in /usr/local/lib/python3.7/dist-packages (from plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (2.12.0)
Collecting dask
Downloading dask-2021.12.0-py3-none-any.whl (1.0 MB)
[K |████████████████████████████████| 1.0 MB 53.7 MB/s
[?25hCollecting distributed>=2.19
Downloading distributed-2021.12.0-py3-none-any.whl (802 kB)
[K |████████████████████████████████| 802 kB 55.9 MB/s
[?25hRequirement already satisfied: pyyaml in /usr/local/lib/python3.7/dist-packages (from dask->plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (3.13)
Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.7/dist-packages (from dask->plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (21.3)
Requirement already satisfied: cloudpickle>=1.1.1 in /usr/local/lib/python3.7/dist-packages (from dask->plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (1.3.0)
Collecting fsspec>=0.6.0
Downloading fsspec-2021.11.1-py3-none-any.whl (132 kB)
[K |████████████████████████████████| 132 kB 70.0 MB/s
[?25hCollecting partd>=0.3.10
Downloading partd-1.2.0-py3-none-any.whl (19 kB)
Requirement already satisfied: toolz>=0.8.2 in /usr/local/lib/python3.7/dist-packages (from dask->plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (0.11.2)
Collecting cloudpickle>=1.1.1
Downloading cloudpickle-2.0.0-py3-none-any.whl (25 kB)
Requirement already satisfied: psutil>=5.0 in /usr/local/lib/python3.7/dist-packages (from distributed>=2.19->dask-jobqueue->plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (5.4.8)
Requirement already satisfied: tblib>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from distributed>=2.19->dask-jobqueue->plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (1.7.0)
Requirement already satisfied: sortedcontainers!=2.0.0,!=2.0.1 in /usr/local/lib/python3.7/dist-packages (from distributed>=2.19->dask-jobqueue->plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (2.4.0)
Requirement already satisfied: msgpack>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from distributed>=2.19->dask-jobqueue->plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (1.0.3)
Requirement already satisfied: jinja2 in /usr/local/lib/python3.7/dist-packages (from distributed>=2.19->dask-jobqueue->plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (2.11.3)
Requirement already satisfied: tornado>=5 in /usr/local/lib/python3.7/dist-packages (from distributed>=2.19->dask-jobqueue->plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (5.1.1)
Requirement already satisfied: zict>=0.1.3 in /usr/local/lib/python3.7/dist-packages (from distributed>=2.19->dask-jobqueue->plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (2.0.0)
Collecting locket
Downloading locket-0.2.1-py2.py3-none-any.whl (4.1 kB)
Requirement already satisfied: heapdict in /usr/local/lib/python3.7/dist-packages (from zict>=0.1.3->distributed>=2.19->dask-jobqueue->plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (1.0.1)
Requirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.7/dist-packages (from jinja2->distributed>=2.19->dask-jobqueue->plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (2.0.1)
Requirement already satisfied: mizani>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from plotnine->plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (0.6.0)
Requirement already satisfied: descartes>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from plotnine->plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (1.1.0)
Requirement already satisfied: patsy>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from plotnine->plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (0.5.2)
Requirement already satisfied: palettable in /usr/local/lib/python3.7/dist-packages (from mizani>=0.6.0->plotnine->plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (3.3.0)
Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn->plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (1.1.0)
Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn->plantcv->-r microscopy-dl-suite-tf/dl-suite/requirements.txt (line 12)) (3.0.0)
Building wheels for collected packages: imreg-dft
Building wheel for imreg-dft (setup.py) ... [?25l[?25hdone
Created wheel for imreg-dft: filename=imreg_dft-2.0.0-py3-none-any.whl size=47201 sha256=a35874a495a0560a89805a470aa4cd97b4044a720c3570fd06b329e8477ac582
Stored in directory: /root/.cache/pip/wheels/fe/07/aa/5294d6612a7945a8ab549be2374111c64193c1faabcadafec7
Successfully built imreg-dft
Installing collected packages: locket, partd, fsspec, cloudpickle, dask, distributed, tensorflow-estimator, tensorboard, h5py, gast, dask-jobqueue, tensorflow, SimpleITK, plantcv, opencv-python-headless, imreg-dft
Attempting uninstall: cloudpickle
Found existing installation: cloudpickle 1.3.0
Uninstalling cloudpickle-1.3.0:
Successfully uninstalled cloudpickle-1.3.0
Attempting uninstall: dask
Found existing installation: dask 2.12.0
Uninstalling dask-2.12.0:
Successfully uninstalled dask-2.12.0
Attempting uninstall: distributed
Found existing installation: distributed 1.25.3
Uninstalling distributed-1.25.3:
Successfully uninstalled distributed-1.25.3
Attempting uninstall: tensorflow-estimator
Found existing installation: tensorflow-estimator 2.7.0
Uninstalling tensorflow-estimator-2.7.0:
Successfully uninstalled tensorflow-estimator-2.7.0
Attempting uninstall: tensorboard
Found existing installation: tensorboard 2.7.0
Uninstalling tensorboard-2.7.0:
Successfully uninstalled tensorboard-2.7.0
Attempting uninstall: h5py
Found existing installation: h5py 3.1.0
Uninstalling h5py-3.1.0:
Successfully uninstalled h5py-3.1.0
Attempting uninstall: gast
Found existing installation: gast 0.4.0
Uninstalling gast-0.4.0:
Successfully uninstalled gast-0.4.0
Attempting uninstall: tensorflow
Found existing installation: tensorflow 2.7.0
Uninstalling tensorflow-2.7.0:
Successfully uninstalled tensorflow-2.7.0
[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
gym 0.17.3 requires cloudpickle<1.7.0,>=1.2.0, but you have cloudpickle 2.0.0 which is incompatible.[0m
Successfully installed SimpleITK-2.1.1 cloudpickle-2.0.0 dask-2021.12.0 dask-jobqueue-0.7.3 distributed-2021.12.0 fsspec-2021.11.1 gast-0.3.3 h5py-2.10.0 imreg-dft-2.0.0 locket-0.2.1 opencv-python-headless-4.5.4.60 partd-1.2.0 plantcv-3.13.4 tensorboard-2.2.2 tensorflow-2.2.0 tensorflow-estimator-2.2.0
###Markdown
Alternative: Mount your drive and work with the information there
###Code
from google.colab import drive
drive.mount('/content/drive')
import os
path_code = "Path where the code is stored"
os.chdir(path_code)
!pip3 install -r microscopy-dl-suite-tf/dl-suite/requirements.txt
###Output
_____no_output_____
###Markdown
Download example dataIt will download the example data from a [Zenodo repository](https://zenodo.org/record/5777994) and unzip it.If you use this data, please cite the corresponding publication
###Code
!wget https://zenodo.org/record/5777994/files/HT1080WT_PHASECONTRAST.zip
!unzip HT1080WT_PHASECONTRAST.zip?download=1
###Output
Archive: HT1080WT_PHASECONTRAST.zip?download=1
creating: HT1080WT_PHASECONTRAST/
creating: HT1080WT_PHASECONTRAST/val/
inflating: HT1080WT_PHASECONTRAST/val/original_videos_ordered.csv
creating: HT1080WT_PHASECONTRAST/val/inputs/
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_018.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_022.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_025.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_021.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_026.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_023.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_024.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_027.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_020.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_019.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_017.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_016.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_015.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_014.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_013.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_012.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_008.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_011.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_010.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_007.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_009.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_006.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_004.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_005.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_002.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_003.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_001.tif
inflating: HT1080WT_PHASECONTRAST/val/inputs/raw_000.tif
creating: HT1080WT_PHASECONTRAST/val/labels/
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_025.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_019.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_021.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_022.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_012.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_023.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_026.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_020.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_027.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_024.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_018.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_015.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_017.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_016.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_013.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_014.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_005.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_011.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_007.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_009.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_010.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_006.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_008.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_003.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_004.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_002.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_000.tif
inflating: HT1080WT_PHASECONTRAST/val/labels/instance_ids_001.tif
creating: HT1080WT_PHASECONTRAST/val/stack2im/
creating: HT1080WT_PHASECONTRAST/val/stack2im/labels/
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_021.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_025.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_027.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_023.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_024.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_026.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_009.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_022.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_000.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_008.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_007.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_018.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_014.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_011.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_012.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_004.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_020.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_019.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_003.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_013.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_001.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_010.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_015.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_016.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_017.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_005.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_006.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/labels/instance_ids_002.tif
creating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_027.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_026.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_025.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_024.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_016.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_023.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_012.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_011.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_007.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_021.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_010.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_000.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_006.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_020.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_002.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_005.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_018.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_022.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_014.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_019.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_015.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_003.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_004.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_017.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_009.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_008.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_001.tif
inflating: HT1080WT_PHASECONTRAST/val/stack2im/inputs/raw_013.tif
creating: HT1080WT_PHASECONTRAST/train/
creating: HT1080WT_PHASECONTRAST/train/inputs/
inflating: HT1080WT_PHASECONTRAST/train/inputs/5000_7-1-14_001_xy004_435-449.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/10000_11-19-13_1002_xy008_266-268.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/5000 data_6-12-14_1_xy004_489-491.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/Videos_IL-8_6-12-14_1_xy039_071-073.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/5000 data_6-15-14_1_xy003_282-284.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/10000_11-19-13_1002_xy009_267-269.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/Videos_IL-6_5-31-14_1_xy015_160-162.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/5000_7-1-14_001_xy004_154-163.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/Videos_IL-8_6-12-14_1_xy041_083-097.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/5000_6-15-14_1_xy001_399-413.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/Videos_IL-6_12-5-12_1_xy28_397-411.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/10000_11-20-13_1003_xy010_091-105.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/5000 data_6-12-14_1_xy002_177-191.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/5000_7-1-14_001_xy004_480-494.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/Videos_IL-6_12-5-12_1_xy33_065-079.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/Videos_IL-6_12-5-12_1_xy28023-037.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/5000_6-15-14_1_xy001_102-121.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/5000_6-15-14_1_xy005_072-086.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/5000 data_6-12-14_1_xy002_410-424.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/Videos_IL-6_12-5-12_1_xy33_087-101.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/5000_7-1-14_001_xy003_341-350.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/Videos_IL-8_6-15-14_1_xy031_245-259.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/Videos_IL-6_5-31-14_1_xy008_360-374.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/5000_7-1-14_001_xy004_035-049.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/Videos_IL-6_5-31-14_1_xy009_445-459.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/10000_11-19-13_1002_xy010_254-256.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/5000 data_6-15-14_1_xy004_078-092.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/Videos_IL-6_12-5-12_1_xy34_219-233.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/10000_11-20-13_1003_xy020_001-014.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/Videos_IL-8_6-12-14_1_xy038_269-283.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/Videos_IL-6_12-5-12_1_xy28_430-465.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/5000_7-1-14_001_xy002_350-364.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/Videos_IL-6_12-5-12_1_xy35_485-499.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/Videos_IL-8_6-15-14_1_xy034_269-283.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/Videos_IL-6_12-5-12_1_xy35_205-254.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/10000_11-20-13_1003_xy020_028-042.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/10000_11-20-13_1003_xy012_375-389.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/10000_11-19-13_1002_xy013_001-015.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/Videos_IL-6_12-5-12_1_xy35_110-154.tif
inflating: HT1080WT_PHASECONTRAST/train/inputs/5000 data_6-12-14_1_xy002_216-300.tif
creating: HT1080WT_PHASECONTRAST/train/labels/
inflating: HT1080WT_PHASECONTRAST/train/labels/5000 data_6-12-14_1_xy002_177-191.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/5000 data_6-12-14_1_xy002_410-424.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/5000 data_6-12-14_1_xy002_216-300.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/Videos_IL-6_12-5-12_1_xy34_219-233.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/5000 data_6-12-14_1_xy004_489-491.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/5000 data_6-15-14_1_xy003_282-284.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/5000 data_6-15-14_1_xy004_078-092.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/10000_11-19-13_1002_xy008_266-268.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/10000_11-19-13_1002_xy009_267-269.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/10000_11-19-13_1002_xy010_254-256.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/10000_11-19-13_1002_xy013_001-015.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/10000_11-20-13_1003_xy010_091-105.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/10000_11-20-13_1003_xy012_375-389.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/Videos_IL-6_5-31-14_1_xy008_360-374.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/Videos_IL-6_5-31-14_1_xy009_445-459.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/Videos_IL-6_5-31-14_1_xy015_160-162.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/Videos_IL-6_12-5-12_1_xy28_023-037.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/Videos_IL-6_12-5-12_1_xy28_397-411.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/Videos_IL-6_12-5-12_1_xy28_430-465.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/Videos_IL-6_12-5-12_1_xy33_065-079.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/Videos_IL-6_12-5-12_1_xy33_087-101.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/Videos_IL-8_6-12-14_1_xy038_269-283.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/Videos_IL-8_6-12-14_1_xy039_071-073.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/Videos_IL-8_6-12-14_1_xy041_083-097.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/Videos_IL-8_6-15-14_1_xy031_245-259.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/Videos_IL-8_6-15-14_1_xy034_269-283.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/5000_6-15-14_1_xy001_102-121.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/5000_6-15-14_1_xy001_399-413.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/5000_6-15-14_1_xy005_072-086.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/5000_7-1-14_001_xy002_350-364.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/5000_7-1-14_001_xy003_341-350.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/5000_7-1-14_001_xy004_035-049.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/5000_7-1-14_001_xy004_154-163.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/5000_7-1-14_001_xy004_435-449.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/5000_7-1-14_001_xy004_480-494.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/10000_11-20-13_1003_xy020_001-014.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/10000_11-20-13_1003_xy020_028-042.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/Videos_IL-6_12-5-12_1_xy35_110-154.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/Videos_IL-6_12-5-12_1_xy35_205-254.tif
inflating: HT1080WT_PHASECONTRAST/train/labels/Videos_IL-6_12-5-12_1_xy35_485-499.tif
creating: HT1080WT_PHASECONTRAST/train/stack2im/
inflating: HT1080WT_PHASECONTRAST/train/stack2im/videos2im_relation.csv
creating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_001.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_002.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_003.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_004.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_005.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_006.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_007.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_008.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_009.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_010.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_011.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_012.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_013.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_014.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_015.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_016.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_017.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_018.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_019.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_020.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_021.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_022.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_023.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_024.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_025.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_026.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_027.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_028.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_029.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_030.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_031.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_032.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_033.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_034.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_035.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_036.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_037.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_038.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_039.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_040.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_041.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_042.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_043.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_044.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_045.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_046.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_047.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_048.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_049.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_050.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_051.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_052.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_053.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_054.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_055.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_056.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_057.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_058.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_059.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_060.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_061.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_062.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_063.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_064.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_065.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_066.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_067.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_068.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_069.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_070.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_071.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_072.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_073.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_074.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_075.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_076.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_077.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_078.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_079.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_080.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_081.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_082.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_083.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_084.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_085.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_086.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_087.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_088.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_089.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_090.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_091.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_092.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_093.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_094.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_095.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_096.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_097.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_098.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_099.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_100.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_101.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_102.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_103.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_104.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_105.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_106.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_107.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_108.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_109.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_110.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_111.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_112.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_113.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_114.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_115.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_116.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_117.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_118.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_119.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_120.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_121.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_122.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_123.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_124.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_125.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_126.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_127.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_128.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_129.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_130.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_131.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_132.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_133.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_134.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_135.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_136.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_137.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_138.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_139.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_140.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_141.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_142.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_143.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_144.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_145.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_146.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_147.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_148.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_149.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_150.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_151.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_152.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_153.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_154.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_155.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_156.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_157.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_158.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_159.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_160.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_161.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_162.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_163.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_164.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_165.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_166.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_167.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_168.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_169.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_170.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_171.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_172.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_173.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_174.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_175.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_176.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_177.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_178.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_179.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_180.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_181.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_182.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_183.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_184.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_185.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_186.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_187.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_188.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_189.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_190.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_191.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_192.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_193.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_194.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_195.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_196.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_197.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_198.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_199.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_200.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_201.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_202.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_203.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_204.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_205.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_206.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_207.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_208.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_209.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_210.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_211.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_212.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_213.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_214.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_215.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_216.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_217.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_218.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_219.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_220.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_221.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_222.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_223.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_224.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_225.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_226.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_227.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_228.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_229.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_230.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_231.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_232.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_233.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_234.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_235.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_236.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_237.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_238.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_239.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_240.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_241.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_242.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_243.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_244.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_245.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_246.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_247.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_248.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_249.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_250.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_251.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_252.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_253.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_254.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_255.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_256.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_257.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_258.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_259.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_260.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_261.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_262.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_263.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_264.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_265.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_266.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_267.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_268.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_269.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_270.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_271.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_272.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_273.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_274.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_275.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_276.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_277.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_278.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_279.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_280.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_281.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_282.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_283.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_284.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_285.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_286.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_287.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_288.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_289.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_290.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_291.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_292.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_293.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_294.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_295.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_296.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_297.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_298.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_299.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_300.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_301.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_302.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_303.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_304.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_305.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_306.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_307.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_308.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_309.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_310.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_311.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_312.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_313.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_314.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_315.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_316.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_317.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_318.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_319.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_320.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_321.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_322.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_323.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_324.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_325.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_326.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_327.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_328.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_329.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_330.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_331.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_332.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_333.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_334.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_335.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_336.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_337.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_338.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_339.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_340.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_341.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_342.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_343.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_344.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_345.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_346.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_347.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_348.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_349.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_350.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_351.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_352.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_353.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_354.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_355.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_356.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_357.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_358.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_359.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_360.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_361.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_362.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_363.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_364.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_365.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_366.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_367.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_368.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_369.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_370.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_371.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_372.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_373.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_374.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_375.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_376.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_377.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_378.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_379.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_380.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_381.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_382.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_383.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_384.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_385.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_386.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_387.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_388.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_389.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_390.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_391.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_392.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_393.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_394.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_395.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_396.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_397.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_398.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_399.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_400.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_401.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_402.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_403.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_404.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_405.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_406.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_407.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_408.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_409.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_410.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_411.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_412.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_413.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_414.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_415.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_416.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_417.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_418.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_419.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_420.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_421.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_422.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_423.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_424.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_425.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_426.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_427.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_428.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_429.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_430.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_431.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_432.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_433.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_434.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_435.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_436.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_437.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_438.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_439.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_440.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_441.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_442.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_443.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_444.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_445.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_446.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_447.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_448.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_449.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_450.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_451.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_452.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_453.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_454.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_455.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_456.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_457.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_458.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_459.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_460.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_461.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_462.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_463.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_464.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_465.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_466.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_467.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_468.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_469.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_470.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_471.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_472.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_473.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_474.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_475.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_476.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_477.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_478.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_479.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_480.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_481.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_482.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_483.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_484.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_485.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_486.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_487.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_488.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_489.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_490.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_491.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_492.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_493.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_494.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_495.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_496.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_497.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_498.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_499.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_500.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_501.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_502.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_503.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_504.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_505.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_506.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_507.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_508.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_509.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_510.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_511.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_512.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_513.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_514.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_515.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_516.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_517.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_518.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_519.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_520.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_521.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_522.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_523.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_524.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_525.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_526.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_527.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_528.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_529.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_530.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_531.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_532.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_533.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_534.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_535.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_536.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_537.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_538.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_539.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_540.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_541.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_542.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_543.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_544.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_545.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_546.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_547.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_548.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_549.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_550.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_551.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_552.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_553.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_554.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_555.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_556.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_557.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_558.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_559.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_560.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_561.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_562.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_563.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_564.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_565.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_566.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_567.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_568.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_569.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_570.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_571.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_572.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_573.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_574.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_575.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_576.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_577.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_578.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_579.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_580.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_581.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_582.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_583.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_584.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_585.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_586.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_587.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_588.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_589.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_590.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_591.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_592.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_593.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_594.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_595.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_596.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_597.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_598.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_599.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_600.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_601.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_602.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_603.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_604.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_605.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_606.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_607.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_608.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_609.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_610.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_611.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_612.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_613.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_614.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_615.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_616.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_617.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_618.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_619.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_620.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_621.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_622.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_623.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_624.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_625.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_626.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_627.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_628.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_629.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_630.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_631.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_632.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_633.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_634.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_635.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_636.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_637.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_638.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_639.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_640.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_641.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_642.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_643.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_644.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_645.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_646.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_647.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_648.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_649.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_650.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_651.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_652.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_653.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_654.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_655.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_656.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_657.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_658.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_659.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_660.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_661.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_662.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_663.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_664.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_665.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/inputs/raw_666.tif
creating: HT1080WT_PHASECONTRAST/train/stack2im/labels/
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_356.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_357.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_001.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_002.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_003.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_004.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_005.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_006.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_007.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_008.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_009.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_010.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_011.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_012.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_013.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_014.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_015.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_016.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_017.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_018.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_019.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_020.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_021.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_022.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_023.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_024.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_025.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_026.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_027.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_028.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_029.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_030.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_031.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_032.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_033.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_034.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_035.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_036.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_037.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_038.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_039.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_040.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_041.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_042.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_043.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_044.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_045.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_046.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_047.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_048.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_049.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_050.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_051.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_052.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_053.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_054.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_055.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_056.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_057.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_058.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_059.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_060.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_061.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_062.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_063.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_064.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_065.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_066.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_067.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_068.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_069.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_070.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_071.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_072.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_073.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_074.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_075.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_076.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_077.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_078.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_079.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_080.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_081.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_082.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_083.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_084.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_085.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_086.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_087.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_088.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_089.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_090.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_091.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_092.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_093.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_094.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_095.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_096.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_097.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_098.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_099.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_100.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_101.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_102.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_103.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_104.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_105.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_106.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_107.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_108.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_109.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_110.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_111.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_112.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_113.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_114.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_115.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_116.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_117.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_118.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_119.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_120.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_121.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_122.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_123.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_124.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_125.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_126.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_127.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_128.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_129.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_130.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_131.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_132.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_133.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_134.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_135.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_136.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_137.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_138.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_139.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_140.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_141.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_142.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_143.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_144.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_145.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_146.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_147.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_148.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_149.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_150.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_151.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_152.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_153.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_154.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_155.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_156.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_157.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_158.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_159.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_160.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_161.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_162.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_163.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_164.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_165.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_166.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_167.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_168.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_169.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_170.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_171.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_172.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_173.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_174.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_175.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_176.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_177.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_178.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_179.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_180.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_181.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_182.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_183.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_184.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_185.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_186.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_187.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_188.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_189.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_190.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_191.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_192.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_193.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_194.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_195.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_196.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_197.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_198.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_199.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_200.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_201.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_202.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_203.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_204.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_205.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_206.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_207.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_208.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_209.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_210.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_211.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_212.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_213.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_214.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_215.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_216.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_217.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_218.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_219.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_220.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_221.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_222.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_223.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_224.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_225.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_226.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_227.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_228.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_229.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_230.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_231.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_232.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_233.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_234.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_235.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_236.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_237.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_238.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_239.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_240.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_241.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_242.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_243.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_244.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_245.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_246.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_247.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_248.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_249.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_250.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_251.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_252.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_253.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_254.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_255.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_256.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_257.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_258.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_259.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_260.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_261.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_262.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_263.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_264.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_265.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_266.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_267.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_268.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_269.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_270.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_271.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_272.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_273.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_274.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_275.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_276.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_277.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_278.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_279.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_280.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_281.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_282.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_283.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_284.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_285.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_286.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_287.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_288.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_289.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_290.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_291.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_292.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_293.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_294.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_295.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_296.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_297.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_298.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_299.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_300.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_301.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_302.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_303.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_304.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_305.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_306.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_307.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_308.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_309.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_310.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_311.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_312.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_313.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_314.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_315.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_316.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_317.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_318.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_319.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_320.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_321.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_322.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_323.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_324.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_325.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_326.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_327.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_328.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_329.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_330.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_331.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_332.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_333.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_334.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_335.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_336.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_337.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_338.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_339.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_340.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_341.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_342.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_343.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_344.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_345.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_346.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_347.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_348.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_349.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_350.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_351.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_352.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_353.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_354.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_355.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_358.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_359.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_360.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_361.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_362.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_363.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_364.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_365.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_366.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_367.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_368.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_369.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_370.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_371.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_372.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_373.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_374.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_375.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_376.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_377.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_378.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_379.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_380.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_381.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_382.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_383.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_384.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_385.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_386.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_387.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_388.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_389.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_390.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_391.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_392.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_393.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_394.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_395.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_396.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_397.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_398.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_399.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_400.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_401.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_402.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_403.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_404.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_405.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_406.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_407.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_408.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_409.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_410.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_411.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_412.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_413.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_414.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_415.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_416.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_417.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_418.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_419.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_420.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_421.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_422.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_423.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_424.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_425.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_426.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_427.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_428.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_429.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_430.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_431.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_432.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_433.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_434.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_435.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_436.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_437.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_438.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_439.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_440.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_441.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_442.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_443.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_444.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_445.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_446.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_447.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_448.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_449.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_450.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_451.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_452.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_453.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_454.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_455.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_456.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_457.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_458.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_459.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_460.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_461.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_462.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_463.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_464.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_465.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_466.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_467.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_468.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_469.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_470.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_471.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_472.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_473.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_474.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_475.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_476.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_477.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_478.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_479.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_480.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_481.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_482.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_483.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_484.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_485.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_486.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_487.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_488.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_489.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_490.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_491.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_492.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_493.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_494.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_495.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_496.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_497.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_498.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_499.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_500.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_501.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_502.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_503.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_504.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_505.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_506.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_507.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_508.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_509.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_510.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_511.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_512.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_513.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_514.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_515.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_516.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_517.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_518.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_519.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_520.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_521.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_522.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_523.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_524.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_525.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_526.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_527.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_528.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_529.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_530.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_531.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_532.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_533.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_534.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_535.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_536.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_537.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_538.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_539.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_540.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_541.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_542.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_543.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_544.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_545.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_546.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_547.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_548.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_549.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_550.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_551.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_552.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_553.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_554.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_555.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_556.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_557.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_558.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_559.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_560.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_561.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_562.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_563.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_564.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_565.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_566.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_567.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_568.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_569.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_570.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_571.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_572.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_573.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_574.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_575.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_576.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_577.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_578.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_579.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_580.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_581.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_582.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_583.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_584.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_585.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_586.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_587.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_588.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_589.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_590.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_591.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_592.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_593.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_594.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_595.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_596.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_597.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_598.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_599.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_600.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_601.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_602.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_603.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_604.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_605.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_606.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_607.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_608.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_609.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_610.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_611.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_612.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_613.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_614.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_615.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_616.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_617.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_618.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_619.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_620.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_621.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_622.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_623.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_624.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_625.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_626.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_627.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_628.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_629.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_630.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_631.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_632.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_633.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_634.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_635.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_636.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_637.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_638.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_639.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_640.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_641.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_642.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_643.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_644.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_645.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_646.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_647.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_648.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_649.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_650.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_651.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_652.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_653.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_654.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_655.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_656.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_657.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_658.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_659.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_660.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_661.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_662.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_663.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_664.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_665.tif
inflating: HT1080WT_PHASECONTRAST/train/stack2im/labels/instance_ids_666.tif
creating: HT1080WT_PHASECONTRAST/test/
creating: HT1080WT_PHASECONTRAST/test/labels/
inflating: HT1080WT_PHASECONTRAST/test/labels/Videos_IL-6_12-5-12_1_xy27_452-467.tif
inflating: HT1080WT_PHASECONTRAST/test/labels/10000_11-20-13_1003_xy008_078-080.tif
inflating: HT1080WT_PHASECONTRAST/test/labels/Videos_IL-6_6-1-14_1001_xy28_330-332.tif
inflating: HT1080WT_PHASECONTRAST/test/labels/5000_6-15-14_1_xy004_255-354.tif
inflating: HT1080WT_PHASECONTRAST/test/labels/5000_7-1-14_001_xy005_157-171.tif
inflating: HT1080WT_PHASECONTRAST/test/labels/10000_11-20-13_1003_xy015_002-004.tif
inflating: HT1080WT_PHASECONTRAST/test/labels/10000_11-20-13_1003_xy018_267-281.tif
inflating: HT1080WT_PHASECONTRAST/test/labels/10000_11-20-13_1003_xy018_289-312.tif
inflating: HT1080WT_PHASECONTRAST/test/labels/Videos_IL-6_12-5-12_1_xy27_001-049.tif
inflating: HT1080WT_PHASECONTRAST/test/labels/Videos_IL-6_12-5-12_1_xy27_306-325.tif
inflating: HT1080WT_PHASECONTRAST/test/labels/Videos_IL-6_12-5-12_1_xy27_340-355.tif
inflating: HT1080WT_PHASECONTRAST/test/labels/Videos_IL-6_12-5-12_1_xy27_370-385.tif
inflating: HT1080WT_PHASECONTRAST/test/labels/Videos_IL-6_12-5-12_1_xy30_040-064.tif
inflating: HT1080WT_PHASECONTRAST/test/labels/Videos_IL-6_12-5-12_1_xy30_090-104.tif
inflating: HT1080WT_PHASECONTRAST/test/labels/Videos_IL-8_6-15-14_1_xy033_181-183.tif
inflating: HT1080WT_PHASECONTRAST/test/labels/5000_7-1-14_001_xy001_240-242.tif
creating: HT1080WT_PHASECONTRAST/test/stack2im/
inflating: HT1080WT_PHASECONTRAST/test/stack2im/videos2im_relation.csv
creating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_003.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_001.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_002.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_221.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_004.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_005.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_006.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_007.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_008.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_009.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_010.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_011.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_012.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_013.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_014.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_015.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_016.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_017.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_018.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_019.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_020.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_021.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_022.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_023.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_024.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_025.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_026.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_027.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_028.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_029.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_030.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_031.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_032.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_033.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_034.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_035.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_036.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_037.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_038.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_039.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_040.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_041.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_042.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_043.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_044.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_045.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_046.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_047.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_048.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_049.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_050.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_051.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_052.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_053.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_054.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_055.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_056.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_057.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_058.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_059.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_060.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_061.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_062.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_063.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_064.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_065.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_066.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_067.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_068.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_069.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_070.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_071.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_072.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_073.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_074.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_075.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_076.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_077.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_078.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_079.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_080.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_081.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_082.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_083.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_084.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_085.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_086.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_087.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_088.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_089.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_090.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_091.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_092.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_093.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_094.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_095.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_096.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_097.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_098.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_099.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_100.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_101.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_102.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_103.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_104.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_105.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_106.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_107.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_108.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_109.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_110.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_111.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_112.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_113.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_114.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_115.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_116.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_117.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_118.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_119.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_120.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_121.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_122.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_123.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_124.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_125.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_126.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_127.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_128.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_129.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_130.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_131.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_132.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_133.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_134.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_135.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_136.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_137.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_138.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_139.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_140.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_141.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_142.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_143.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_144.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_145.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_146.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_147.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_148.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_149.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_150.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_151.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_152.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_153.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_154.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_155.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_156.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_157.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_158.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_159.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_160.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_161.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_162.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_163.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_164.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_165.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_166.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_167.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_168.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_169.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_170.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_171.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_172.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_173.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_174.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_175.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_176.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_177.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_178.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_179.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_180.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_181.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_182.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_183.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_184.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_185.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_186.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_187.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_188.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_189.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_190.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_191.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_192.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_193.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_194.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_195.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_196.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_197.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_198.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_199.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_200.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_201.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_202.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_203.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_204.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_205.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_206.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_207.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_208.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_209.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_210.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_211.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_212.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_213.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_214.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_215.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_216.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_217.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_218.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_219.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_220.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_304.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_222.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_223.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_224.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_225.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_226.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_227.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_228.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_229.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_230.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_231.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_232.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_233.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_234.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_235.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_236.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_237.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_238.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_239.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_240.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_241.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_242.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_243.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_244.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_245.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_246.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_247.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_248.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_249.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_250.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_251.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_252.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_253.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_254.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_255.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_256.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_257.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_258.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_259.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_260.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_261.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_262.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_263.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_264.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_265.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_266.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_267.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_268.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_269.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_270.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_271.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_272.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_273.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_274.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_275.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_276.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_277.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_278.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_279.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_280.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_281.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_282.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_283.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_284.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_285.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_286.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_287.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_288.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_289.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_290.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_291.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_292.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_293.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_294.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_295.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_296.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_297.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_298.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_299.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_300.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_301.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_302.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_303.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_305.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_306.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_307.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_308.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_309.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_310.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_311.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_312.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_313.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_314.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_315.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_316.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_317.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_318.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_319.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_320.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_321.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_322.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_323.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_324.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_325.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/inputs/raw_326.tif
creating: HT1080WT_PHASECONTRAST/test/stack2im/labels/
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_001.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_002.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_003.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_004.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_005.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_006.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_007.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_008.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_009.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_010.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_011.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_012.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_013.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_014.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_015.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_016.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_017.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_018.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_019.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_020.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_021.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_022.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_023.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_024.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_025.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_026.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_027.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_028.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_029.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_030.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_031.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_032.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_033.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_034.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_035.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_036.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_037.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_038.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_039.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_040.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_041.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_042.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_043.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_044.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_045.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_046.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_047.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_048.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_049.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_050.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_051.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_052.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_053.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_054.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_055.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_056.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_057.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_058.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_059.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_060.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_061.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_062.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_063.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_064.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_065.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_066.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_067.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_068.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_069.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_070.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_071.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_072.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_073.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_074.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_075.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_076.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_077.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_078.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_079.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_080.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_081.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_082.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_083.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_084.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_085.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_086.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_087.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_088.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_089.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_090.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_091.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_092.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_093.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_094.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_095.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_096.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_097.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_098.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_099.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_100.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_101.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_102.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_103.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_104.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_105.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_106.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_107.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_108.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_109.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_110.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_111.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_112.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_113.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_114.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_115.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_116.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_117.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_118.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_119.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_120.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_121.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_122.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_123.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_124.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_125.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_126.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_127.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_128.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_129.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_130.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_131.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_132.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_133.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_134.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_135.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_136.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_137.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_138.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_139.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_140.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_141.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_142.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_143.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_144.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_145.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_146.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_147.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_148.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_149.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_150.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_151.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_152.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_153.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_154.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_155.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_156.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_157.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_158.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_159.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_160.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_161.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_162.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_163.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_164.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_165.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_166.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_167.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_168.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_169.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_170.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_171.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_172.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_173.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_174.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_175.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_176.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_177.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_178.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_179.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_180.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_181.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_182.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_183.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_184.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_185.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_186.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_187.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_188.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_189.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_190.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_191.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_192.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_193.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_194.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_195.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_196.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_197.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_198.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_199.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_200.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_201.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_202.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_203.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_204.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_205.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_206.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_207.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_208.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_209.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_210.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_211.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_212.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_213.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_214.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_215.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_216.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_217.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_218.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_219.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_220.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_221.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_222.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_223.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_224.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_225.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_226.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_227.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_228.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_229.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_230.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_231.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_232.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_233.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_234.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_235.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_236.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_237.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_238.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_239.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_240.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_241.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_242.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_243.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_244.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_245.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_246.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_247.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_248.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_249.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_250.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_251.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_252.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_253.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_254.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_255.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_256.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_257.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_258.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_259.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_260.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_261.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_262.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_263.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_264.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_265.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_266.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_267.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_268.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_269.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_270.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_271.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_272.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_273.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_274.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_275.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_276.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_277.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_278.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_279.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_280.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_281.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_282.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_283.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_284.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_285.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_286.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_287.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_288.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_289.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_290.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_291.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_292.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_293.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_294.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_295.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_296.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_297.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_298.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_299.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_300.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_301.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_302.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_303.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_304.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_305.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_306.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_307.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_308.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_309.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_310.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_311.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_312.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_313.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_314.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_315.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_316.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_317.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_318.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_319.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_320.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_321.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_322.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_323.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_324.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_325.tif
inflating: HT1080WT_PHASECONTRAST/test/stack2im/labels/instance_ids_326.tif
creating: HT1080WT_PHASECONTRAST/test/inputs/
inflating: HT1080WT_PHASECONTRAST/test/inputs/Videos_IL-6_6-1-14_1001_xy28_330-332.tif
inflating: HT1080WT_PHASECONTRAST/test/inputs/10000_11-20-13_1003_xy008_078-080.tif
inflating: HT1080WT_PHASECONTRAST/test/inputs/Videos_IL-6_12-5-12_1_xy30_090-104.tif
inflating: HT1080WT_PHASECONTRAST/test/inputs/5000_7-1-14_001_xy001_240-242.tif
inflating: HT1080WT_PHASECONTRAST/test/inputs/Videos_IL-8_6-15-14_1_xy033_181-183.tif
inflating: HT1080WT_PHASECONTRAST/test/inputs/10000_11-20-13_1003_xy015_002-004.tif
inflating: HT1080WT_PHASECONTRAST/test/inputs/Videos_IL-6_12-5-12_1_xy27_452-467.tif
inflating: HT1080WT_PHASECONTRAST/test/inputs/5000_7-1-14_001_xy005_157-171.tif
inflating: HT1080WT_PHASECONTRAST/test/inputs/Videos_IL-6_12-5-12_1_xy27_370-385.tif
inflating: HT1080WT_PHASECONTRAST/test/inputs/10000_11-20-13_1003_xy018_267-281.tif
inflating: HT1080WT_PHASECONTRAST/test/inputs/Videos_IL-6_12-5-12_1_xy27_340-355.tif
inflating: HT1080WT_PHASECONTRAST/test/inputs/Videos_IL-6_12-5-12_1_xy30_040-064.tif
inflating: HT1080WT_PHASECONTRAST/test/inputs/Videos_IL-6_12-5-12_1_xy27_306-325.tif
inflating: HT1080WT_PHASECONTRAST/test/inputs/10000_11-20-13_1003_xy018_289-312.tif
inflating: HT1080WT_PHASECONTRAST/test/inputs/Videos_IL-6_12-5-12_1_xy27_001-049.tif
inflating: HT1080WT_PHASECONTRAST/test/inputs/5000_6-15-14_1_xy004_255-354.tif
creating: HT1080WT_PHASECONTRAST/test/CTC-format/
creating: HT1080WT_PHASECONTRAST/test/CTC-format/09_GT/
creating: HT1080WT_PHASECONTRAST/test/CTC-format/09_GT/SEG/
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/09_GT/SEG/man_seg000.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/09_GT/SEG/man_seg001.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/09_GT/SEG/man_seg002.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/09_GT/SEG/man_seg003.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/09_GT/SEG/man_seg004.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/09_GT/SEG/man_seg005.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/09_GT/SEG/man_seg006.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/09_GT/SEG/man_seg007.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/09_GT/SEG/man_seg008.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/09_GT/SEG/man_seg009.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/09_GT/SEG/man_seg010.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/09_GT/SEG/man_seg011.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/09_GT/SEG/man_seg012.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/09_GT/SEG/man_seg013.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/09_GT/SEG/man_seg014.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/09_GT/SEG/man_seg015.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/09_GT/SEG/man_seg016.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/09_GT/SEG/man_seg017.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/09_GT/SEG/man_seg018.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/09_GT/SEG/man_seg019.tif
creating: HT1080WT_PHASECONTRAST/test/CTC-format/11_GT/
creating: HT1080WT_PHASECONTRAST/test/CTC-format/11_GT/SEG/
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/11_GT/SEG/man_seg000.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/11_GT/SEG/man_seg001.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/11_GT/SEG/man_seg002.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/11_GT/SEG/man_seg003.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/11_GT/SEG/man_seg004.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/11_GT/SEG/man_seg005.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/11_GT/SEG/man_seg006.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/11_GT/SEG/man_seg007.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/11_GT/SEG/man_seg008.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/11_GT/SEG/man_seg009.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/11_GT/SEG/man_seg010.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/11_GT/SEG/man_seg011.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/11_GT/SEG/man_seg012.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/11_GT/SEG/man_seg013.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/11_GT/SEG/man_seg014.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/11_GT/SEG/man_seg015.tif
creating: HT1080WT_PHASECONTRAST/test/CTC-format/16_GT/
creating: HT1080WT_PHASECONTRAST/test/CTC-format/16_GT/SEG/
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/16_GT/SEG/man_seg000.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/16_GT/SEG/man_seg001.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/16_GT/SEG/man_seg002.tif
creating: HT1080WT_PHASECONTRAST/test/CTC-format/15_GT/
creating: HT1080WT_PHASECONTRAST/test/CTC-format/15_GT/SEG/
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/15_GT/SEG/man_seg000.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/15_GT/SEG/man_seg001.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/15_GT/SEG/man_seg002.tif
creating: HT1080WT_PHASECONTRAST/test/CTC-format/14_GT/
creating: HT1080WT_PHASECONTRAST/test/CTC-format/14_GT/SEG/
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/14_GT/SEG/man_seg000.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/14_GT/SEG/man_seg001.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/14_GT/SEG/man_seg002.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/14_GT/SEG/man_seg003.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/14_GT/SEG/man_seg004.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/14_GT/SEG/man_seg005.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/14_GT/SEG/man_seg006.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/14_GT/SEG/man_seg007.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/14_GT/SEG/man_seg008.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/14_GT/SEG/man_seg009.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/14_GT/SEG/man_seg010.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/14_GT/SEG/man_seg011.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/14_GT/SEG/man_seg012.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/14_GT/SEG/man_seg013.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/14_GT/SEG/man_seg014.tif
creating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/
creating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg000.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg001.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg002.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg003.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg004.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg005.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg006.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg007.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg008.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg009.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg010.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg011.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg012.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg013.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg014.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg015.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg016.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg017.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg018.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg019.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg020.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg021.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg022.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg023.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/13_GT/SEG/man_seg024.tif
creating: HT1080WT_PHASECONTRAST/test/CTC-format/12_GT/
creating: HT1080WT_PHASECONTRAST/test/CTC-format/12_GT/SEG/
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/12_GT/SEG/man_seg000.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/12_GT/SEG/man_seg001.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/12_GT/SEG/man_seg002.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/12_GT/SEG/man_seg003.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/12_GT/SEG/man_seg004.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/12_GT/SEG/man_seg005.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/12_GT/SEG/man_seg006.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/12_GT/SEG/man_seg007.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/12_GT/SEG/man_seg008.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/12_GT/SEG/man_seg009.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/12_GT/SEG/man_seg010.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/12_GT/SEG/man_seg011.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/12_GT/SEG/man_seg012.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/12_GT/SEG/man_seg013.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/12_GT/SEG/man_seg014.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/12_GT/SEG/man_seg015.tif
creating: HT1080WT_PHASECONTRAST/test/CTC-format/10_GT/
creating: HT1080WT_PHASECONTRAST/test/CTC-format/10_GT/SEG/
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/10_GT/SEG/man_seg000.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/10_GT/SEG/man_seg001.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/10_GT/SEG/man_seg002.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/10_GT/SEG/man_seg003.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/10_GT/SEG/man_seg004.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/10_GT/SEG/man_seg005.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/10_GT/SEG/man_seg006.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/10_GT/SEG/man_seg007.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/10_GT/SEG/man_seg008.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/10_GT/SEG/man_seg009.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/10_GT/SEG/man_seg010.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/10_GT/SEG/man_seg011.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/10_GT/SEG/man_seg012.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/10_GT/SEG/man_seg013.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/10_GT/SEG/man_seg014.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/10_GT/SEG/man_seg015.tif
creating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/
creating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg000.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg001.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg002.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg003.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg004.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg005.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg006.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg007.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg008.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg009.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg010.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg011.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg012.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg013.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg014.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg015.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg016.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg017.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg018.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg019.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg020.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg021.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg022.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg023.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg024.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg025.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg026.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg027.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg028.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg029.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg030.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg031.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg032.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg033.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg034.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg035.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg036.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg037.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg038.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg039.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg040.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg041.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg042.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg043.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg044.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg045.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg046.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg047.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/08_GT/SEG/man_seg048.tif
creating: HT1080WT_PHASECONTRAST/test/CTC-format/07_GT/
creating: HT1080WT_PHASECONTRAST/test/CTC-format/07_GT/SEG/
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/07_GT/SEG/man_seg000.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/07_GT/SEG/man_seg001.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/07_GT/SEG/man_seg002.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/07_GT/SEG/man_seg003.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/07_GT/SEG/man_seg004.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/07_GT/SEG/man_seg005.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/07_GT/SEG/man_seg006.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/07_GT/SEG/man_seg007.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/07_GT/SEG/man_seg008.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/07_GT/SEG/man_seg009.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/07_GT/SEG/man_seg010.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/07_GT/SEG/man_seg011.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/07_GT/SEG/man_seg012.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/07_GT/SEG/man_seg013.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/07_GT/SEG/man_seg014.tif
creating: HT1080WT_PHASECONTRAST/test/CTC-format/06_GT/
creating: HT1080WT_PHASECONTRAST/test/CTC-format/06_GT/SEG/
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/06_GT/SEG/man_seg000.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/06_GT/SEG/man_seg001.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/06_GT/SEG/man_seg002.tif
creating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/
creating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg000.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg001.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg002.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg003.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg004.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg005.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg006.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg007.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg008.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg009.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg010.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg011.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg012.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg013.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg014.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg015.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg016.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg017.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg018.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg019.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg020.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg021.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg022.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg023.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg024.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg025.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg026.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg027.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg028.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg029.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg030.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg031.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg032.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg033.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg034.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg035.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg036.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg037.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg038.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg039.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg040.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg041.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg042.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg043.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg044.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg045.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg046.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg047.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg048.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg049.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg050.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg051.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg052.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg053.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg054.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg055.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg056.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg057.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg058.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg059.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg060.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg061.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg062.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg063.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg064.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg065.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg066.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg067.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg068.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg069.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg070.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg071.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg072.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg073.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg074.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg075.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg076.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg077.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg078.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg079.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg080.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg081.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg082.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg083.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg084.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg085.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg086.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg087.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg088.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg089.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg090.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg091.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg092.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg093.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg094.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg095.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg096.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg097.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg098.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/05_GT/SEG/man_seg099.tif
creating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/
creating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg000.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg001.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg002.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg003.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg004.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg005.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg006.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg007.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg008.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg009.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg010.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg011.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg012.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg013.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg014.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg015.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg016.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg017.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg018.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg019.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg020.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg021.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg022.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/04_GT/SEG/man_seg023.tif
creating: HT1080WT_PHASECONTRAST/test/CTC-format/03_GT/
creating: HT1080WT_PHASECONTRAST/test/CTC-format/03_GT/SEG/
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/03_GT/SEG/man_seg000.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/03_GT/SEG/man_seg001.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/03_GT/SEG/man_seg002.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/03_GT/SEG/man_seg003.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/03_GT/SEG/man_seg004.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/03_GT/SEG/man_seg005.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/03_GT/SEG/man_seg006.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/03_GT/SEG/man_seg007.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/03_GT/SEG/man_seg008.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/03_GT/SEG/man_seg009.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/03_GT/SEG/man_seg010.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/03_GT/SEG/man_seg011.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/03_GT/SEG/man_seg012.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/03_GT/SEG/man_seg013.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/03_GT/SEG/man_seg014.tif
creating: HT1080WT_PHASECONTRAST/test/CTC-format/02_GT/
creating: HT1080WT_PHASECONTRAST/test/CTC-format/02_GT/SEG/
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/02_GT/SEG/man_seg000.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/02_GT/SEG/man_seg001.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/02_GT/SEG/man_seg002.tif
creating: HT1080WT_PHASECONTRAST/test/CTC-format/01_GT/
creating: HT1080WT_PHASECONTRAST/test/CTC-format/01_GT/SEG/
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/01_GT/SEG/man_seg000.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/01_GT/SEG/man_seg001.tif
inflating: HT1080WT_PHASECONTRAST/test/CTC-format/01_GT/SEG/man_seg002.tif
inflating: HT1080WT_PHASECONTRAST/README
###Markdown
Train Set up a new training from scratch
###Code
import json
import time
import os
json_template ='/content/microscopy-dl-suite-tf/examples/config/config_template.json'
training_data = "/content/HT1080WT_PHASECONTRAST/train"
validation_data = "/content/HT1080WT_PHASECONTRAST/val"
test_data = "/content/HT1080WT_PHASECONTRAST/test"
# The test data will be store following the cell tracking challenge order, so we need to get some order that corresponds to the CTC GT.
test_data_video_relation = "/content/HT1080WT_PHASECONTRAST/test/stack2im/videos2im_relation.csv"
## OPTIONS FOR MODEL ARCHITECTURES. TRAINING DATA NEEDS TO CORRESPOND TO THE
# INPUT-OUTPUT FORMAT (2D IMAGES OR 2D VIDEO FILES)
# ---------------------------------------------------------
# - 'mobilenet_mobileunet_lstm': A pretrained mobilenet in the encoder with skip connections to the decoder of a mobileunet and a ConvLSTM layer at the end that will make the entire architecture recursive.
# - 'mobilenet_mobileunet': A pretrained mobilenet in the encoder with skip connections to the decoder of a mobileunet (2D).
# - 'unet_lstm': 2D U-Net with ConvLSTM units in the contracting path.
# - 'categorical_unet_transpose': 2D U-Net for different labels ({0}, {1}, ...) with transpose convolutions instead of upsampling.
# - 'categorical_unet_fc_dil': 2D U-Net for different labels ({0}, {1}, ...) with fully connected dilated convolutions.
# - 'categorical_unet_fc': 2D U-Net for different labels ({0}, {1}, ...) with fully connected convolutions.
# - 'categorical_unet': 2D U-Net for different labels ({0}, {1}, ...).
# - 'unet' or "None": 2D U-Net with a single output.
model_architecture = "mobilenet_mobileunet_lstm"
## TRAINING PARAMETERS
training_epochs = 10
learning_rate = 0.00001
# 1 if we want to freeze the mobilenet encoder to
# apply some transfer learning, 0 if we want to train the entire network:
freeze_pretrained_encoder = 0
## TOTAL BATCH SIZE: patch_batch*bach_size
patch_batch = 1 # Number of patches cropped from each image on each epoch.
bach_size = 2 # Number of images to take on each epoch
spatial_input_shape = [256, 256]
# CREATE THE CONFIGURATION FILE
with open(json_template, "r") as jsonFile:
data = json.load(jsonFile)
data["cnn_name"] = model_architecture
data["TRAINPATH"] = training_data
data["TESTPATH"] = test_data
data["VALPATH"] = validation_data
data["PATH2VIDEOS"] = test_data_video_relation
data["model_train_decoder_only"] = freeze_pretrained_encoder
data["model_lr"] = learning_rate
data["train_max_epochs"] = training_epochs
data["datagen_patch_batch"] = patch_batch
data["datagen_batch_size"] = bach_size
data["datagen_dim_size"] = spatial_input_shape
# CHOOSE A SEED FOR THE MODEL INITIALIZATION AND THE DATA GENERATOR TO REPRODUCE
# THE RESULTS
data["model_seed_initializer"] = 11
data["train_seed_initializer"] = 2
# CHOOSE THE PATH TO STORE THE MODEL
main_path = "/content/"
output_path = os.path.join(main_path, "my_fine_tuned_model")
if not os.path.exists(output_path):
os.mkdir(output_path)
data["OUTPUTPATH"] = output_path
new_json = os.path.join(output_path, "my_config.json")
with open('/content/my_config.json', "w") as jsonFile:
json.dump(data, jsonFile)
with open(new_json, "w") as jsonFile:
json.dump(data, jsonFile)
!python /content/microscopy-dl-suite-tf/dl-suite/train.py '/content/my_config.json'
###Output
Seed 11 fixed for model initialization
{'n_filters': 16, 'pools': 3, 'kernel_size': [3, 3], 'dilation_rate': 2, 'mobile_alpha': 0.35, 'time_windows': 5, 'lr': 1e-05, 'dropout': 0.01, 'activation': 'elu', 'last_activation': 'tanh', 'padding': 'same', 'kernel_initializer': 'glorot_uniform', 'lossfunction': 'sparse_cce', 'loss_tips': 'L1L2', 'metrics': 'accuracy', 'train_decoder_only': 0, 'category_weights': [1, 10], 'seed_initializer': 11}
WARNING:tensorflow:`input_shape` is undefined or non-square, or `rows` is not in [96, 128, 160, 192, 224]. Weights for input shape (224, 224) will be loaded as the default.
2021-12-14 20:40:41.814196: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcuda.so.1
2021-12-14 20:40:41.831085: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-14 20:40:41.831790: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1561] Found device 0 with properties:
pciBusID: 0000:00:04.0 name: Tesla P100-PCIE-16GB computeCapability: 6.0
coreClock: 1.3285GHz coreCount: 56 deviceMemorySize: 15.90GiB deviceMemoryBandwidth: 681.88GiB/s
2021-12-14 20:40:41.832116: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1
2021-12-14 20:40:41.834033: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10
2021-12-14 20:40:41.835013: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10
2021-12-14 20:40:41.835321: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10
2021-12-14 20:40:41.837120: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10
2021-12-14 20:40:41.837913: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10
2021-12-14 20:40:41.842393: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7
2021-12-14 20:40:41.842545: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-14 20:40:41.843251: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-14 20:40:41.843844: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1703] Adding visible gpu devices: 0
2021-12-14 20:40:41.844220: I tensorflow/core/platform/cpu_feature_guard.cc:143] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
2021-12-14 20:40:41.849686: I tensorflow/core/platform/profile_utils/cpu_utils.cc:102] CPU Frequency: 2199995000 Hz
2021-12-14 20:40:41.849896: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x555bbf184840 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2021-12-14 20:40:41.849930: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version
2021-12-14 20:40:42.099106: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-14 20:40:42.100025: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x555bbf184680 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:
2021-12-14 20:40:42.100058: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Tesla P100-PCIE-16GB, Compute Capability 6.0
2021-12-14 20:40:42.100274: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-14 20:40:42.100834: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1561] Found device 0 with properties:
pciBusID: 0000:00:04.0 name: Tesla P100-PCIE-16GB computeCapability: 6.0
coreClock: 1.3285GHz coreCount: 56 deviceMemorySize: 15.90GiB deviceMemoryBandwidth: 681.88GiB/s
2021-12-14 20:40:42.100922: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1
2021-12-14 20:40:42.100951: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10
2021-12-14 20:40:42.100980: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10
2021-12-14 20:40:42.101002: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10
2021-12-14 20:40:42.101028: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10
2021-12-14 20:40:42.101048: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10
2021-12-14 20:40:42.101069: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7
2021-12-14 20:40:42.101149: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-14 20:40:42.101749: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-14 20:40:42.102281: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1703] Adding visible gpu devices: 0
2021-12-14 20:40:42.102350: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1
2021-12-14 20:40:42.103608: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1102] Device interconnect StreamExecutor with strength 1 edge matrix:
2021-12-14 20:40:42.103645: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1108] 0
2021-12-14 20:40:42.103667: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1121] 0: N
2021-12-14 20:40:42.103787: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-14 20:40:42.104355: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-14 20:40:42.104885: W tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.cc:39] Overriding allow_growth setting because the TF_FORCE_GPU_ALLOW_GROWTH environment variable is set. Original config value was 0.
2021-12-14 20:40:42.104929: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1247] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 15064 MB memory) -> physical GPU (device: 0, name: Tesla P100-PCIE-16GB, pci bus id: 0000:00:04.0, compute capability: 6.0)
Model: "model_3"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_image (InputLayer) [(None, None, None, None, 0
_________________________________________________________________
time_distributed (TimeDistri (None, None, None, None, 38688
_________________________________________________________________
conv_lst_m2d (ConvLSTM2D) (None, None, None, 16) 18496
_________________________________________________________________
tf_op_layer_ExpandDims (Tens [(None, 1, None, None, 16 0
_________________________________________________________________
time_distributed_1 (TimeDist (None, 1, None, None, 2) 34
=================================================================
Total params: 57,218
Trainable params: 55,026
Non-trainable params: 2,192
_________________________________________________________________
U-Net with MobileNetV2 encoder and MobileDecoder with ConvLSTM2D for segmentation.
Seed 2 fixed for training data generator
2021-12-14 20:40:49.058302: I tensorflow/core/profiler/lib/profiler_session.cc:159] Profiler session started.
2021-12-14 20:40:49.058377: I tensorflow/core/profiler/internal/gpu/cupti_tracer.cc:1363] Profiler found 1 GPUs
2021-12-14 20:40:49.059314: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcupti.so.10.1
2021-12-14 20:40:49.189044: I tensorflow/core/profiler/internal/gpu/cupti_tracer.cc:1479] CUPTI activity buffer flushed
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
Epoch 1/10
2021-12-14 20:40:57.775756: E tensorflow/core/grappler/optimizers/meta_optimizer.cc:563] layout failed: Invalid argument: MutableGraphView::SortTopologically error: detected edge(s) creating cycle(s) {'model_3/conv_lst_m2d/while/body/_1/mul_2' -> 'model_3/conv_lst_m2d/while/body/_1/add_5', 'model_3/conv_lst_m2d/while/body/_1/clip_by_value_2' -> 'model_3/conv_lst_m2d/while/body/_1/mul_5', 'model_3/conv_lst_m2d/while/body/_1/convolution_4' -> 'model_3/conv_lst_m2d/while/body/_1/add', 'model_3/conv_lst_m2d/while/body/_1/Elu' -> 'model_3/conv_lst_m2d/while/body/_1/mul_3'}.
2021-12-14 20:40:59.701320: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7
2021-12-14 20:41:00.917949: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10
2021-12-14 20:41:01.615373: I tensorflow/core/profiler/lib/profiler_session.cc:159] Profiler session started.
1/20 [>.............................] - ETA: 0s - loss: 0.7350 - jaccard_sparse3D: 0.0306 - accuracy: 0.47912021-12-14 20:41:02.440573: I tensorflow/core/profiler/internal/gpu/cupti_tracer.cc:1479] CUPTI activity buffer flushed
2021-12-14 20:41:02.441512: I tensorflow/core/profiler/internal/gpu/device_tracer.cc:216] GpuTracer has collected 2384 callback api events and 2384 activity events.
2021-12-14 20:41:02.511710: I tensorflow/core/profiler/rpc/client/save_profile.cc:168] Creating directory: /content/my_fine_tuned_model/logs/train/plugins/profile/2021_12_14_20_41_02
2021-12-14 20:41:02.552037: I tensorflow/core/profiler/rpc/client/save_profile.cc:174] Dumped gzipped tool data for trace.json.gz to /content/my_fine_tuned_model/logs/train/plugins/profile/2021_12_14_20_41_02/38285312d993.trace.json.gz
2021-12-14 20:41:02.577113: I tensorflow/core/profiler/utils/event_span.cc:288] Generation of step-events took 0.796 ms
2021-12-14 20:41:02.583827: I tensorflow/python/profiler/internal/profiler_wrapper.cc:87] Creating directory: /content/my_fine_tuned_model/logs/train/plugins/profile/2021_12_14_20_41_02Dumped tool data for overview_page.pb to /content/my_fine_tuned_model/logs/train/plugins/profile/2021_12_14_20_41_02/38285312d993.overview_page.pb
Dumped tool data for input_pipeline.pb to /content/my_fine_tuned_model/logs/train/plugins/profile/2021_12_14_20_41_02/38285312d993.input_pipeline.pb
Dumped tool data for tensorflow_stats.pb to /content/my_fine_tuned_model/logs/train/plugins/profile/2021_12_14_20_41_02/38285312d993.tensorflow_stats.pb
Dumped tool data for kernel_stats.pb to /content/my_fine_tuned_model/logs/train/plugins/profile/2021_12_14_20_41_02/38285312d993.kernel_stats.pb
3/20 [===>..........................] - ETA: 9s - loss: 0.7360 - jaccard_sparse3D: 0.0207 - accuracy: 0.4766TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
12/20 [=================>............] - ETA: 9s - loss: 0.7298 - jaccard_sparse3D: 0.0201 - accuracy: 0.4819 TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
13/20 [==================>...........] - ETA: 8s - loss: 0.7295 - jaccard_sparse3D: 0.0186 - accuracy: 0.4827TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
18/20 [==========================>...] - ETA: 2s - loss: 0.7273 - jaccard_sparse3D: 0.0195 - accuracy: 0.4854TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
20/20 [==============================] - ETA: 0s - loss: 0.7263 - jaccard_sparse3D: 0.0187 - accuracy: 0.48672021-12-14 20:41:31.317512: E tensorflow/core/grappler/optimizers/meta_optimizer.cc:563] layout failed: Invalid argument: MutableGraphView::SortTopologically error: detected edge(s) creating cycle(s) {'model_3/conv_lst_m2d/while/body/_1/mul_2' -> 'model_3/conv_lst_m2d/while/body/_1/add_5', 'model_3/conv_lst_m2d/while/model_3/conv_lst_m2d/convolution/BatchToSpaceND_switch/_26-0-TransposeNHWCToNCHW-LayoutOptimizer' -> 'model_3/conv_lst_m2d/while/model_3/conv_lst_m2d/convolution/BatchToSpaceND_switch/_26'}.
2021-12-14 20:41:33.552121: E tensorflow/core/grappler/optimizers/meta_optimizer.cc:563] layout failed: Invalid argument: MutableGraphView::SortTopologically error: detected edge(s) creating cycle(s) {'model_3/conv_lst_m2d/while/body/_1/Elu_1' -> 'model_3/conv_lst_m2d/while/body/_1/mul_5', 'model_3/conv_lst_m2d/while/body/_1/mul_2' -> 'model_3/conv_lst_m2d/while/body/_1/add_5', 'model_3/conv_lst_m2d/while/body/_1/convolution_7' -> 'model_3/conv_lst_m2d/while/body/_1/add_6'}.
20/20 [==============================] - 32s 2s/step - loss: 0.7263 - jaccard_sparse3D: 0.0187 - accuracy: 0.4867 - val_loss: 0.7144 - val_jaccard_sparse3D: 0.0000e+00 - val_accuracy: 0.1184
Epoch 2/10
2/20 [==>...........................] - ETA: 12s - loss: 0.7175 - jaccard_sparse3D: 0.0213 - accuracy: 0.5005TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
15/20 [=====================>........] - ETA: 7s - loss: 0.7109 - jaccard_sparse3D: 0.0192 - accuracy: 0.5092TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
16/20 [=======================>......] - ETA: 6s - loss: 0.7104 - jaccard_sparse3D: 0.0189 - accuracy: 0.5097TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
20/20 [==============================] - 31s 2s/step - loss: 0.7084 - jaccard_sparse3D: 0.0168 - accuracy: 0.5127 - val_loss: 0.7144 - val_jaccard_sparse3D: 0.0000e+00 - val_accuracy: 0.1596
Epoch 3/10
2/20 [==>...........................] - ETA: 13s - loss: 0.7004 - jaccard_sparse3D: 0.0158 - accuracy: 0.5235TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
4/20 [=====>........................] - ETA: 22s - loss: 0.6993 - jaccard_sparse3D: 0.0159 - accuracy: 0.5277TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
7/20 [=========>....................] - ETA: 17s - loss: 0.6966 - jaccard_sparse3D: 0.0154 - accuracy: 0.5307TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
14/20 [====================>.........] - ETA: 8s - loss: 0.6940 - jaccard_sparse3D: 0.0135 - accuracy: 0.5326 TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
20/20 [==============================] - 31s 2s/step - loss: 0.6920 - jaccard_sparse3D: 0.0128 - accuracy: 0.5365 - val_loss: 0.7137 - val_jaccard_sparse3D: 0.0000e+00 - val_accuracy: 0.2080
Epoch 4/10
9/20 [============>.................] - ETA: 14s - loss: 0.6805 - jaccard_sparse3D: 0.0120 - accuracy: 0.5544TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
13/20 [==================>...........] - ETA: 10s - loss: 0.6793 - jaccard_sparse3D: 0.0119 - accuracy: 0.5567TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
17/20 [========================>.....] - ETA: 4s - loss: 0.6773 - jaccard_sparse3D: 0.0107 - accuracy: 0.5595TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
20/20 [==============================] - 31s 2s/step - loss: 0.6761 - jaccard_sparse3D: 0.0100 - accuracy: 0.5612 - val_loss: 0.7120 - val_jaccard_sparse3D: 0.0000e+00 - val_accuracy: 0.2651
Epoch 5/10
4/20 [=====>........................] - ETA: 17s - loss: 0.6639 - jaccard_sparse3D: 0.0113 - accuracy: 0.5753TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
7/20 [=========>....................] - ETA: 14s - loss: 0.6627 - jaccard_sparse3D: 0.0128 - accuracy: 0.5779TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
9/20 [============>.................] - ETA: 13s - loss: 0.6625 - jaccard_sparse3D: 0.0115 - accuracy: 0.5791TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
19/20 [===========================>..] - ETA: 1s - loss: 0.6595 - jaccard_sparse3D: 0.0110 - accuracy: 0.5860TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
20/20 [==============================] - 31s 2s/step - loss: 0.6592 - jaccard_sparse3D: 0.0106 - accuracy: 0.5867 - val_loss: 0.7089 - val_jaccard_sparse3D: 0.0000e+00 - val_accuracy: 0.3292
Epoch 6/10
6/20 [========>.....................] - ETA: 15s - loss: 0.6496 - jaccard_sparse3D: 0.0071 - accuracy: 0.6013TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
8/20 [===========>..................] - ETA: 14s - loss: 0.6482 - jaccard_sparse3D: 0.0063 - accuracy: 0.6033TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
11/20 [===============>..............] - ETA: 12s - loss: 0.6469 - jaccard_sparse3D: 0.0073 - accuracy: 0.6042TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
17/20 [========================>.....] - ETA: 4s - loss: 0.6447 - jaccard_sparse3D: 0.0071 - accuracy: 0.6078TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
20/20 [==============================] - 31s 2s/step - loss: 0.6436 - jaccard_sparse3D: 0.0075 - accuracy: 0.6101 - val_loss: 0.7041 - val_jaccard_sparse3D: 0.0000e+00 - val_accuracy: 0.3985
Epoch 7/10
7/20 [=========>....................] - ETA: 16s - loss: 0.6332 - jaccard_sparse3D: 0.0094 - accuracy: 0.6280TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
8/20 [===========>..................] - ETA: 17s - loss: 0.6329 - jaccard_sparse3D: 0.0087 - accuracy: 0.6281TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
10/20 [==============>...............] - ETA: 14s - loss: 0.6318 - jaccard_sparse3D: 0.0084 - accuracy: 0.6278TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
12/20 [=================>............] - ETA: 11s - loss: 0.6307 - jaccard_sparse3D: 0.0083 - accuracy: 0.6292TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
20/20 [==============================] - 31s 2s/step - loss: 0.6281 - jaccard_sparse3D: 0.0065 - accuracy: 0.6333 - val_loss: 0.6974 - val_jaccard_sparse3D: 0.0000e+00 - val_accuracy: 0.4725
Epoch 8/10
5/20 [======>.......................] - ETA: 16s - loss: 0.6244 - jaccard_sparse3D: 0.0063 - accuracy: 0.6465TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
9/20 [============>.................] - ETA: 14s - loss: 0.6205 - jaccard_sparse3D: 0.0063 - accuracy: 0.6480TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
13/20 [==================>...........] - ETA: 9s - loss: 0.6168 - jaccard_sparse3D: 0.0057 - accuracy: 0.6512 TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
15/20 [=====================>........] - ETA: 7s - loss: 0.6157 - jaccard_sparse3D: 0.0056 - accuracy: 0.6527TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
20/20 [==============================] - 31s 2s/step - loss: 0.6133 - jaccard_sparse3D: 0.0054 - accuracy: 0.6554 - val_loss: 0.6881 - val_jaccard_sparse3D: 0.0000e+00 - val_accuracy: 0.5471
Epoch 9/10
7/20 [=========>....................] - ETA: 16s - loss: 0.6027 - jaccard_sparse3D: 0.0035 - accuracy: 0.6675TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
9/20 [============>.................] - ETA: 14s - loss: 0.6014 - jaccard_sparse3D: 0.0046 - accuracy: 0.6676TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
12/20 [=================>............] - ETA: 11s - loss: 0.5994 - jaccard_sparse3D: 0.0043 - accuracy: 0.6700TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
14/20 [====================>.........] - ETA: 8s - loss: 0.5991 - jaccard_sparse3D: 0.0039 - accuracy: 0.6724TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
20/20 [==============================] - 31s 2s/step - loss: 0.5968 - jaccard_sparse3D: 0.0044 - accuracy: 0.6766 - val_loss: 0.6767 - val_jaccard_sparse3D: 0.0000e+00 - val_accuracy: 0.6161
Epoch 10/10
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
1/20 [>.............................] - ETA: 0s - loss: 0.5864 - jaccard_sparse3D: 0.0023 - accuracy: 0.6900TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
8/20 [===========>..................] - ETA: 17s - loss: 0.5857 - jaccard_sparse3D: 0.0036 - accuracy: 0.6913TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
18/20 [==========================>...] - ETA: 2s - loss: 0.5818 - jaccard_sparse3D: 0.0030 - accuracy: 0.6972TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
20/20 [==============================] - 30s 1s/step - loss: 0.5810 - jaccard_sparse3D: 0.0028 - accuracy: 0.6973 - val_loss: 0.6622 - val_jaccard_sparse3D: 0.0000e+00 - val_accuracy: 0.6808
[960, 960]
[115, 115]
Processing images from /content/HT1080WT_PHASECONTRAST/test
Processing video 10000_11-20-13_1003_xy008_078-080.tif:
Processing image raw_001
2021-12-14 20:46:26.598043: E tensorflow/core/grappler/optimizers/meta_optimizer.cc:563] layout failed: Invalid argument: MutableGraphView::SortTopologically error: detected edge(s) creating cycle(s) {'model_3/conv_lst_m2d/while/body/_1/Elu_1' -> 'model_3/conv_lst_m2d/while/body/_1/mul_5', 'Func/model_3/conv_lst_m2d/while/body/_1/input/_68' -> 'model_3/conv_lst_m2d/while/body/_1/mul_2', 'model_3/conv_lst_m2d/while/body/_1/convolution_7' -> 'model_3/conv_lst_m2d/while/body/_1/add_6'}.
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_002
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_003
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video 10000_11-20-13_1003_xy015_002-004.tif:
Processing image raw_004
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_005
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_006
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video 10000_11-20-13_1003_xy018_267-281.tif:
Processing image raw_007
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_008
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_009
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_010
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_011
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_012
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_013
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_014
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_015
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_016
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_017
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_018
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_019
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_020
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_021
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video 10000_11-20-13_1003_xy018_289-312.tif:
Processing image raw_022
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_023
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_024
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_025
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_026
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_027
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_028
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_029
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_030
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_031
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_032
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_033
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_034
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_035
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_036
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_037
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_038
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_039
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_040
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_041
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_042
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_043
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_044
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_045
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video 5000_6-15-14_1_xy004_255-354.tif:
Processing image raw_046
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_047
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_048
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_049
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_050
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_051
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_052
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_053
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_054
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_055
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_056
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_057
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_058
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_059
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_060
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_061
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_062
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_063
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_064
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_065
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_066
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_067
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_068
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_069
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_070
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_071
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_072
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_073
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_074
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_075
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_076
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_077
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_078
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_079
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_080
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_081
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_082
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_083
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_084
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_085
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_086
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_087
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_088
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_089
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_090
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_091
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_092
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_093
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_094
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_095
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_096
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_097
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_098
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_099
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_100
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_101
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_102
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_103
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_104
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_105
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_106
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_107
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_108
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_109
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_110
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_111
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_112
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_113
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_114
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_115
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_116
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_117
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_118
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_119
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_120
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_121
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_122
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_123
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_124
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_125
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_126
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_127
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_128
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_129
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_130
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_131
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_132
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_133
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_134
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_135
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_136
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_137
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_138
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_139
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_140
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_141
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_142
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_143
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_144
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_145
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video 5000_7-1-14_001_xy001_240-242.tif:
Processing image raw_146
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_147
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_148
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video 5000_7-1-14_001_xy005_157-171.tif:
Processing image raw_149
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_150
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_151
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_152
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_153
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_154
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_155
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_156
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_157
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_158
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_159
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_160
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_161
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_162
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_163
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video Videos_IL-6_12-5-12_1_xy27_001-049.tif:
Processing image raw_164
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_165
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_166
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_167
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_168
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_169
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_170
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_171
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_172
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_173
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_174
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_175
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_176
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_177
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_178
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_179
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_180
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_181
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_182
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_183
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_184
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_185
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_186
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_187
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_188
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_189
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_190
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_191
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_192
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_193
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_194
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_195
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_196
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_197
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_198
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_199
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_200
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_201
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_202
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_203
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_204
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_205
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_206
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_207
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_208
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_209
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_210
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_211
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_212
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video Videos_IL-6_12-5-12_1_xy27_306-325.tif:
Processing image raw_213
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_214
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_215
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_216
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_217
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_218
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_219
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_220
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_221
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_222
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_223
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_224
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_225
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_226
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_227
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_228
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_229
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_230
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_231
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_232
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video Videos_IL-6_12-5-12_1_xy27_340-355.tif:
Processing image raw_233
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_234
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_235
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_236
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_237
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_238
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_239
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_240
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_241
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_242
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_243
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_244
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_245
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_246
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_247
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_248
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video Videos_IL-6_12-5-12_1_xy27_370-385.tif:
Processing image raw_249
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_250
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_251
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_252
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_253
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_254
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_255
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_256
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_257
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_258
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_259
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_260
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_261
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_262
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_263
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_264
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video Videos_IL-6_12-5-12_1_xy27_452-467.tif:
Processing image raw_265
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_266
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_267
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_268
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_269
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_270
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_271
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_272
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_273
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_274
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_275
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_276
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_277
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_278
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_279
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_280
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video Videos_IL-6_12-5-12_1_xy30_040-064.tif:
Processing image raw_281
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_282
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_283
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_284
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_285
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_286
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_287
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_288
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_289
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_290
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_291
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_292
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_293
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_294
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_295
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_296
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_297
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_298
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_299
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_300
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_301
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_302
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_303
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_304
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_305
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video Videos_IL-6_12-5-12_1_xy30_090-104.tif:
Processing image raw_306
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_307
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_308
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_309
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_310
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_311
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_312
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_313
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_314
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_315
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_316
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_317
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_318
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_319
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_320
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video Videos_IL-6_6-1-14_1001_xy28_330-332.tif:
Processing image raw_321
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_322
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_323
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video Videos_IL-8_6-15-14_1_xy033_181-183.tif:
Processing image raw_324
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_325
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_326
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video 10000_11-20-13_1003_xy008_078-080:
[---] 100.0%
Processing video 10000_11-20-13_1003_xy015_002-004:
[---] 100.0%
Processing video 10000_11-20-13_1003_xy018_267-281:
[---------------] 100.0%
Processing video 10000_11-20-13_1003_xy018_289-312:
[------------------------] 100.0%
Processing video 5000_6-15-14_1_xy004_255-354:
[----------------------------------------------------------------------------------------------------] 100.0%
Processing video 5000_7-1-14_001_xy001_240-242:
[---] 100.0%
Processing video 5000_7-1-14_001_xy005_157-171:
[---------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy27_001-049:
[-------------------------------------------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy27_306-325:
[--------------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy27_340-355:
[----------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy27_370-385:
[----------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy27_452-467:
[----------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy30_040-064:
[-------------------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy30_090-104:
[---------------] 100.0%
Processing video Videos_IL-6_6-1-14_1001_xy28_330-332:
[---] 100.0%
Processing video Videos_IL-8_6-15-14_1_xy033_181-183:
[---] 100.0%
All videos have been reconstructed
Processing video 10000_11-20-13_1003_xy008_078-080:
[---] 100.0%
Processing video 10000_11-20-13_1003_xy015_002-004:
[---] 100.0%
Processing video 10000_11-20-13_1003_xy018_267-281:
[---------------] 100.0%
Processing video 10000_11-20-13_1003_xy018_289-312:
[------------------------] 100.0%
Processing video 5000_6-15-14_1_xy004_255-354:
[----------------------------------------------------------------------------------------------------] 100.0%
Processing video 5000_7-1-14_001_xy001_240-242:
[---] 100.0%
Processing video 5000_7-1-14_001_xy005_157-171:
[---------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy27_001-049:
[-------------------------------------------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy27_306-325:
[--------------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy27_340-355:
[----------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy27_370-385:
[----------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy27_452-467:
[----------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy30_040-064:
[-------------------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy30_090-104:
[---------------] 100.0%
Processing video Videos_IL-6_6-1-14_1001_xy28_330-332:
[---] 100.0%
Processing video Videos_IL-8_6-15-14_1_xy033_181-183:
[---] 100.0%
Results organizing finished.
###Markdown
Keep training a model from a certain checkpointWe will use the same `config.json` as before (in the folder `my_fine_tuned_model`) and create a new folder to store the new results called `my_new_fine_tuned_model`.
###Code
# READ THE PREVIOUS JSON
json_file_path = "/content/my_fine_tuned_model/my_config.json"
# READ THE PREVIOUS CONFIGURATION FILE
with open(json_file_path, "r") as jsonFile:
data = json.load(jsonFile)
output_path = "/content/my_new_fine_tuned_model"
data["OUTPUTPATH"] = output_path
# SETUP THE PATH FOR THE PRETRAINED MODEL
data["train_pretrained_weights"] = "/content/my_fine_tuned_model/checkpoints/mobilenet_mobileunet_lstmlast.hdf5"
# CHANGE ANY ADDITIONAL SET UPS FOR THE TRAINING
data["train_max_epochs"] = 5
if not os.path.exists(output_path):
os.mkdir(output_path)
# STORE THE CONFIGURATION FILE IN THE NEW MODEL FOLDER
new_json = os.path.join(output_path, "my_config.json")
with open(new_json, "w") as jsonFile:
json.dump(data, jsonFile)
# STORE THE CONFIGURATION FILE IN CURRENT FOLDER TO CALL IT
with open('/content/my_config.json', "w") as jsonFile:
json.dump(data, jsonFile)
!python /content/microscopy-dl-suite-tf/dl-suite/train.py '/content/my_config.json'
###Output
Seed 11 fixed for model initialization
{'n_filters': 16, 'pools': 3, 'kernel_size': [3, 3], 'dilation_rate': 2, 'mobile_alpha': 0.35, 'time_windows': 5, 'lr': 1e-05, 'dropout': 0.01, 'activation': 'elu', 'last_activation': 'tanh', 'padding': 'same', 'kernel_initializer': 'glorot_uniform', 'lossfunction': 'sparse_cce', 'loss_tips': 'L1L2', 'metrics': 'accuracy', 'train_decoder_only': 0, 'category_weights': [1, 10], 'seed_initializer': 11}
WARNING:tensorflow:`input_shape` is undefined or non-square, or `rows` is not in [96, 128, 160, 192, 224]. Weights for input shape (224, 224) will be loaded as the default.
2021-12-14 21:06:51.205437: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcuda.so.1
2021-12-14 21:06:51.227707: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-14 21:06:51.228420: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1561] Found device 0 with properties:
pciBusID: 0000:00:04.0 name: Tesla P100-PCIE-16GB computeCapability: 6.0
coreClock: 1.3285GHz coreCount: 56 deviceMemorySize: 15.90GiB deviceMemoryBandwidth: 681.88GiB/s
2021-12-14 21:06:51.234359: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1
2021-12-14 21:06:51.247151: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10
2021-12-14 21:06:51.253059: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10
2021-12-14 21:06:51.258262: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10
2021-12-14 21:06:51.268286: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10
2021-12-14 21:06:51.271254: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10
2021-12-14 21:06:51.285106: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7
2021-12-14 21:06:51.285255: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-14 21:06:51.285925: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-14 21:06:51.286436: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1703] Adding visible gpu devices: 0
2021-12-14 21:06:51.286767: I tensorflow/core/platform/cpu_feature_guard.cc:143] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
2021-12-14 21:06:51.291902: I tensorflow/core/platform/profile_utils/cpu_utils.cc:102] CPU Frequency: 2199995000 Hz
2021-12-14 21:06:51.292086: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x55bda8646840 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2021-12-14 21:06:51.292125: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version
2021-12-14 21:06:51.536584: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-14 21:06:51.537591: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x55bda8646680 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:
2021-12-14 21:06:51.537647: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Tesla P100-PCIE-16GB, Compute Capability 6.0
2021-12-14 21:06:51.537896: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-14 21:06:51.538513: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1561] Found device 0 with properties:
pciBusID: 0000:00:04.0 name: Tesla P100-PCIE-16GB computeCapability: 6.0
coreClock: 1.3285GHz coreCount: 56 deviceMemorySize: 15.90GiB deviceMemoryBandwidth: 681.88GiB/s
2021-12-14 21:06:51.538596: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1
2021-12-14 21:06:51.538623: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10
2021-12-14 21:06:51.538661: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10
2021-12-14 21:06:51.538687: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10
2021-12-14 21:06:51.538717: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10
2021-12-14 21:06:51.538741: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10
2021-12-14 21:06:51.538767: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7
2021-12-14 21:06:51.538856: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-14 21:06:51.539494: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-14 21:06:51.540042: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1703] Adding visible gpu devices: 0
2021-12-14 21:06:51.540109: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1
2021-12-14 21:06:51.541595: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1102] Device interconnect StreamExecutor with strength 1 edge matrix:
2021-12-14 21:06:51.541638: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1108] 0
2021-12-14 21:06:51.541654: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1121] 0: N
2021-12-14 21:06:51.541796: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-14 21:06:51.542434: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:981] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2021-12-14 21:06:51.543022: W tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.cc:39] Overriding allow_growth setting because the TF_FORCE_GPU_ALLOW_GROWTH environment variable is set. Original config value was 0.
2021-12-14 21:06:51.543071: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1247] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 15064 MB memory) -> physical GPU (device: 0, name: Tesla P100-PCIE-16GB, pci bus id: 0000:00:04.0, compute capability: 6.0)
Model: "model_3"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_image (InputLayer) [(None, None, None, None, 0
_________________________________________________________________
time_distributed (TimeDistri (None, None, None, None, 38688
_________________________________________________________________
conv_lst_m2d (ConvLSTM2D) (None, None, None, 16) 18496
_________________________________________________________________
tf_op_layer_ExpandDims (Tens [(None, 1, None, None, 16 0
_________________________________________________________________
time_distributed_1 (TimeDist (None, 1, None, None, 2) 34
=================================================================
Total params: 57,218
Trainable params: 55,026
Non-trainable params: 2,192
_________________________________________________________________
U-Net with MobileNetV2 encoder and MobileDecoder with ConvLSTM2D for segmentation.
Loading weights from /content/my_fine_tuned_model/checkpoints/mobilenet_mobileunet_lstmlast.hdf5
Seed 2 fixed for training data generator
2021-12-14 21:06:58.869005: I tensorflow/core/profiler/lib/profiler_session.cc:159] Profiler session started.
2021-12-14 21:06:58.869104: I tensorflow/core/profiler/internal/gpu/cupti_tracer.cc:1363] Profiler found 1 GPUs
2021-12-14 21:06:58.880691: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcupti.so.10.1
2021-12-14 21:06:59.027998: I tensorflow/core/profiler/internal/gpu/cupti_tracer.cc:1479] CUPTI activity buffer flushed
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
Epoch 1/5
2021-12-14 21:07:07.895204: E tensorflow/core/grappler/optimizers/meta_optimizer.cc:563] layout failed: Invalid argument: MutableGraphView::SortTopologically error: detected edge(s) creating cycle(s) {'model_3/conv_lst_m2d/while/body/_1/mul_2' -> 'model_3/conv_lst_m2d/while/body/_1/add_5', 'model_3/conv_lst_m2d/while/body/_1/convolution_4' -> 'model_3/conv_lst_m2d/while/body/_1/add', 'model_3/conv_lst_m2d/while/body/_1/Elu' -> 'model_3/conv_lst_m2d/while/body/_1/mul_3', 'model_3/conv_lst_m2d/while/body/_1/clip_by_value_2' -> 'model_3/conv_lst_m2d/while/body/_1/mul_5'}.
2021-12-14 21:07:09.449377: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7
2021-12-14 21:07:10.695469: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10
2021-12-14 21:07:11.491739: I tensorflow/core/profiler/lib/profiler_session.cc:159] Profiler session started.
1/20 [>.............................] - ETA: 0s - loss: 0.5730 - jaccard_sparse3D: 0.0000e+00 - accuracy: 0.76392021-12-14 21:07:11.860700: I tensorflow/core/profiler/internal/gpu/cupti_tracer.cc:1479] CUPTI activity buffer flushed
2021-12-14 21:07:11.861833: I tensorflow/core/profiler/internal/gpu/device_tracer.cc:216] GpuTracer has collected 2384 callback api events and 2384 activity events.
2021-12-14 21:07:11.945327: I tensorflow/core/profiler/rpc/client/save_profile.cc:168] Creating directory: /content/my_new_fine_tuned_model/logs/train/plugins/profile/2021_12_14_21_07_11
2021-12-14 21:07:11.983730: I tensorflow/core/profiler/rpc/client/save_profile.cc:174] Dumped gzipped tool data for trace.json.gz to /content/my_new_fine_tuned_model/logs/train/plugins/profile/2021_12_14_21_07_11/38285312d993.trace.json.gz
2021-12-14 21:07:12.006738: I tensorflow/core/profiler/utils/event_span.cc:288] Generation of step-events took 0.747 ms
2021-12-14 21:07:12.012594: I tensorflow/python/profiler/internal/profiler_wrapper.cc:87] Creating directory: /content/my_new_fine_tuned_model/logs/train/plugins/profile/2021_12_14_21_07_11Dumped tool data for overview_page.pb to /content/my_new_fine_tuned_model/logs/train/plugins/profile/2021_12_14_21_07_11/38285312d993.overview_page.pb
Dumped tool data for input_pipeline.pb to /content/my_new_fine_tuned_model/logs/train/plugins/profile/2021_12_14_21_07_11/38285312d993.input_pipeline.pb
Dumped tool data for tensorflow_stats.pb to /content/my_new_fine_tuned_model/logs/train/plugins/profile/2021_12_14_21_07_11/38285312d993.tensorflow_stats.pb
Dumped tool data for kernel_stats.pb to /content/my_new_fine_tuned_model/logs/train/plugins/profile/2021_12_14_21_07_11/38285312d993.kernel_stats.pb
2/20 [==>...........................] - ETA: 4s - loss: 0.5711 - jaccard_sparse3D: 4.7081e-04 - accuracy: 0.7435TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
3/20 [===>..........................] - ETA: 11s - loss: 0.5726 - jaccard_sparse3D: 7.9813e-04 - accuracy: 0.7331TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
9/20 [============>.................] - ETA: 14s - loss: 0.5693 - jaccard_sparse3D: 0.0016 - accuracy: 0.7184TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
14/20 [====================>.........] - ETA: 8s - loss: 0.5686 - jaccard_sparse3D: 0.0017 - accuracy: 0.7169TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
20/20 [==============================] - ETA: 0s - loss: 0.5660 - jaccard_sparse3D: 0.0023 - accuracy: 0.72022021-12-14 21:07:45.860572: E tensorflow/core/grappler/optimizers/meta_optimizer.cc:563] layout failed: Invalid argument: MutableGraphView::SortTopologically error: detected edge(s) creating cycle(s) {'model_3/conv_lst_m2d/while/body/_1/mul_2' -> 'model_3/conv_lst_m2d/while/body/_1/add_5', 'model_3/conv_lst_m2d/while/model_3/conv_lst_m2d/convolution/BatchToSpaceND_switch/_26-0-TransposeNHWCToNCHW-LayoutOptimizer' -> 'model_3/conv_lst_m2d/while/model_3/conv_lst_m2d/convolution/BatchToSpaceND_switch/_26'}.
2021-12-14 21:07:48.511777: E tensorflow/core/grappler/optimizers/meta_optimizer.cc:563] layout failed: Invalid argument: MutableGraphView::SortTopologically error: detected edge(s) creating cycle(s) {'model_3/conv_lst_m2d/while/body/_1/Elu_1' -> 'model_3/conv_lst_m2d/while/body/_1/mul_5', 'Func/model_3/conv_lst_m2d/while/body/_1/input/_68' -> 'model_3/conv_lst_m2d/while/body/_1/mul_2', 'model_3/conv_lst_m2d/while/body/_1/convolution_7' -> 'model_3/conv_lst_m2d/while/body/_1/add_6'}.
20/20 [==============================] - 37s 2s/step - loss: 0.5660 - jaccard_sparse3D: 0.0023 - accuracy: 0.7202 - val_loss: 0.6447 - val_jaccard_sparse3D: 0.0000e+00 - val_accuracy: 0.7393
Epoch 2/5
1/20 [>.............................] - ETA: 0s - loss: 0.5561 - jaccard_sparse3D: 0.0019 - accuracy: 0.7281TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
4/20 [=====>........................] - ETA: 18s - loss: 0.5561 - jaccard_sparse3D: 0.0026 - accuracy: 0.7294TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
15/20 [=====================>........] - ETA: 7s - loss: 0.5526 - jaccard_sparse3D: 0.0027 - accuracy: 0.7351TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
19/20 [===========================>..] - ETA: 1s - loss: 0.5524 - jaccard_sparse3D: 0.0023 - accuracy: 0.7378TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
20/20 [==============================] - 30s 1s/step - loss: 0.5518 - jaccard_sparse3D: 0.0022 - accuracy: 0.7386 - val_loss: 0.6240 - val_jaccard_sparse3D: 0.0000e+00 - val_accuracy: 0.7912
Epoch 3/5
2/20 [==>...........................] - ETA: 11s - loss: 0.5397 - jaccard_sparse3D: 0.0000e+00 - accuracy: 0.7411TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
6/20 [========>.....................] - ETA: 17s - loss: 0.5405 - jaccard_sparse3D: 4.4523e-04 - accuracy: 0.7480TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
7/20 [=========>....................] - ETA: 16s - loss: 0.5403 - jaccard_sparse3D: 5.9342e-04 - accuracy: 0.7490TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
13/20 [==================>...........] - ETA: 10s - loss: 0.5376 - jaccard_sparse3D: 0.0018 - accuracy: 0.7527TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
20/20 [==============================] - 31s 2s/step - loss: 0.5356 - jaccard_sparse3D: 0.0018 - accuracy: 0.7555 - val_loss: 0.5990 - val_jaccard_sparse3D: 0.0000e+00 - val_accuracy: 0.8353
Epoch 4/5
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
15/20 [=====================>........] - ETA: 6s - loss: 0.5253 - jaccard_sparse3D: 0.0019 - accuracy: 0.7692TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
16/20 [=======================>......] - ETA: 5s - loss: 0.5249 - jaccard_sparse3D: 0.0020 - accuracy: 0.7697TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
20/20 [==============================] - 30s 2s/step - loss: 0.5229 - jaccard_sparse3D: 0.0022 - accuracy: 0.7709 - val_loss: 0.5697 - val_jaccard_sparse3D: 0.0000e+00 - val_accuracy: 0.8742
Epoch 5/5
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
6/20 [========>.....................] - ETA: 17s - loss: 0.5083 - jaccard_sparse3D: 0.0020 - accuracy: 0.7824TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
10/20 [==============>...............] - ETA: 13s - loss: 0.5079 - jaccard_sparse3D: 0.0015 - accuracy: 0.7824TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
14/20 [====================>.........] - ETA: 8s - loss: 0.5104 - jaccard_sparse3D: 0.0018 - accuracy: 0.7832TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50838 (0xc696) encountered.
TIFFReadDirectory: Warning, Unknown field with tag 50839 (0xc697) encountered.
20/20 [==============================] - 30s 2s/step - loss: 0.5074 - jaccard_sparse3D: 0.0018 - accuracy: 0.7848 - val_loss: 0.5350 - val_jaccard_sparse3D: 0.0000e+00 - val_accuracy: 0.9054
[960, 960]
[115, 115]
Processing images from /content/HT1080WT_PHASECONTRAST/test
Processing video 10000_11-20-13_1003_xy008_078-080.tif:
Processing image raw_001
2021-12-14 21:09:58.334231: E tensorflow/core/grappler/optimizers/meta_optimizer.cc:563] layout failed: Invalid argument: MutableGraphView::SortTopologically error: detected edge(s) creating cycle(s) {'model_3/conv_lst_m2d/while/body/_1/Elu_1' -> 'model_3/conv_lst_m2d/while/body/_1/mul_5', 'model_3/conv_lst_m2d/while/body/_1/mul_2' -> 'model_3/conv_lst_m2d/while/body/_1/add_5', 'model_3/conv_lst_m2d/while/body/_1/convolution_7' -> 'model_3/conv_lst_m2d/while/body/_1/add_6'}.
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_002
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_003
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video 10000_11-20-13_1003_xy015_002-004.tif:
Processing image raw_004
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_005
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_006
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video 10000_11-20-13_1003_xy018_267-281.tif:
Processing image raw_007
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_008
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_009
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_010
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_011
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_012
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_013
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_014
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_015
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_016
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_017
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_018
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_019
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_020
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_021
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video 10000_11-20-13_1003_xy018_289-312.tif:
Processing image raw_022
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_023
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_024
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_025
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_026
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_027
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_028
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_029
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_030
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_031
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_032
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_033
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_034
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_035
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_036
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_037
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_038
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_039
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_040
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_041
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_042
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_043
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_044
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_045
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video 5000_6-15-14_1_xy004_255-354.tif:
Processing image raw_046
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_047
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_048
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_049
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_050
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_051
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_052
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_053
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_054
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_055
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_056
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_057
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_058
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_059
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_060
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_061
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_062
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_063
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_064
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_065
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_066
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_067
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_068
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_069
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_070
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_071
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_072
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_073
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_074
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_075
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_076
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_077
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_078
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_079
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_080
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_081
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_082
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_083
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_084
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_085
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_086
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_087
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_088
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_089
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_090
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_091
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_092
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_093
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_094
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_095
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_096
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_097
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_098
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_099
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_100
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_101
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_102
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_103
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_104
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_105
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_106
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_107
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_108
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_109
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_110
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_111
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_112
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_113
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_114
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_115
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_116
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_117
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_118
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_119
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_120
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_121
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_122
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_123
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_124
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_125
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_126
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_127
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_128
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_129
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_130
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_131
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_132
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_133
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_134
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_135
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_136
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_137
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_138
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_139
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_140
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_141
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_142
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_143
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_144
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_145
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video 5000_7-1-14_001_xy001_240-242.tif:
Processing image raw_146
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_147
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_148
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video 5000_7-1-14_001_xy005_157-171.tif:
Processing image raw_149
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_150
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_151
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_152
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_153
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_154
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_155
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_156
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_157
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_158
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_159
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_160
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_161
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_162
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_163
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video Videos_IL-6_12-5-12_1_xy27_001-049.tif:
Processing image raw_164
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_165
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_166
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_167
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_168
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_169
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_170
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_171
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_172
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_173
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_174
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_175
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_176
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_177
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_178
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_179
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_180
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_181
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_182
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_183
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_184
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_185
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_186
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_187
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_188
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_189
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_190
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_191
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_192
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_193
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_194
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_195
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_196
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_197
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_198
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_199
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_200
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_201
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_202
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_203
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_204
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_205
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_206
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_207
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_208
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_209
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_210
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_211
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_212
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video Videos_IL-6_12-5-12_1_xy27_306-325.tif:
Processing image raw_213
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_214
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_215
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_216
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_217
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_218
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_219
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_220
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_221
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_222
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_223
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_224
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_225
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_226
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_227
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_228
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_229
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_230
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_231
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_232
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video Videos_IL-6_12-5-12_1_xy27_340-355.tif:
Processing image raw_233
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_234
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_235
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_236
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_237
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_238
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_239
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_240
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_241
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_242
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_243
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_244
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_245
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_246
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_247
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_248
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video Videos_IL-6_12-5-12_1_xy27_370-385.tif:
Processing image raw_249
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_250
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_251
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_252
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_253
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_254
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_255
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_256
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_257
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_258
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_259
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_260
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_261
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_262
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_263
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_264
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video Videos_IL-6_12-5-12_1_xy27_452-467.tif:
Processing image raw_265
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_266
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_267
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_268
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_269
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_270
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_271
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_272
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_273
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_274
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_275
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_276
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_277
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_278
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_279
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_280
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video Videos_IL-6_12-5-12_1_xy30_040-064.tif:
Processing image raw_281
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_282
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_283
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_284
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_285
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_286
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_287
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_288
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_289
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_290
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_291
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_292
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_293
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_294
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_295
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_296
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_297
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_298
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_299
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_300
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_301
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_302
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_303
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_304
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_305
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video Videos_IL-6_12-5-12_1_xy30_090-104.tif:
Processing image raw_306
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_307
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_308
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_309
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_310
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_311
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_312
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_313
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_314
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_315
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_316
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_317
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_318
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_319
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_320
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video Videos_IL-6_6-1-14_1001_xy28_330-332.tif:
Processing image raw_321
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_322
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_323
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video Videos_IL-8_6-15-14_1_xy033_181-183.tif:
Processing image raw_324
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_325
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing image raw_326
column - 0
column - 1
row - 0
column - 0
column - 1
row - 1
Processing video 10000_11-20-13_1003_xy008_078-080:
[---] 100.0%
Processing video 10000_11-20-13_1003_xy015_002-004:
[---] 100.0%
Processing video 10000_11-20-13_1003_xy018_267-281:
[---------------] 100.0%
Processing video 10000_11-20-13_1003_xy018_289-312:
[------------------------] 100.0%
Processing video 5000_6-15-14_1_xy004_255-354:
[----------------------------------------------------------------------------------------------------] 100.0%
Processing video 5000_7-1-14_001_xy001_240-242:
[---] 100.0%
Processing video 5000_7-1-14_001_xy005_157-171:
[---------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy27_001-049:
[-------------------------------------------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy27_306-325:
[--------------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy27_340-355:
[----------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy27_370-385:
[----------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy27_452-467:
[----------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy30_040-064:
[-------------------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy30_090-104:
[---------------] 100.0%
Processing video Videos_IL-6_6-1-14_1001_xy28_330-332:
[---] 100.0%
Processing video Videos_IL-8_6-15-14_1_xy033_181-183:
[---] 100.0%
All videos have been reconstructed
Processing video 10000_11-20-13_1003_xy008_078-080:
[---] 100.0%
Processing video 10000_11-20-13_1003_xy015_002-004:
[---] 100.0%
Processing video 10000_11-20-13_1003_xy018_267-281:
[---------------] 100.0%
Processing video 10000_11-20-13_1003_xy018_289-312:
[------------------------] 100.0%
Processing video 5000_6-15-14_1_xy004_255-354:
[----------------------------------------------------------------------------------------------------] 100.0%
Processing video 5000_7-1-14_001_xy001_240-242:
[---] 100.0%
Processing video 5000_7-1-14_001_xy005_157-171:
[---------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy27_001-049:
[-------------------------------------------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy27_306-325:
[--------------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy27_340-355:
[----------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy27_370-385:
[----------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy27_452-467:
[----------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy30_040-064:
[-------------------------] 100.0%
Processing video Videos_IL-6_12-5-12_1_xy30_090-104:
[---------------] 100.0%
Processing video Videos_IL-6_6-1-14_1001_xy28_330-332:
[---] 100.0%
Processing video Videos_IL-8_6-15-14_1_xy033_181-183:
[---] 100.0%
Results organizing finished.
###Markdown
Run the training using a .json file that contains all the details
###Code
!python /content/microscopy-dl-suite-tf/dl-suite/train.py '/content/microscopy-dl-suite-tf/examples/config/config_template.json'
###Output
_____no_output_____ |
examples/tutorials/basic_ptmcmc_tutorial.ipynb | ###Markdown
This notebook will show how to use the PTMCMCSampler, in particular this will highlight how to add custom jump proposals create 150914 like injection
###Code
# Set the duration and sampling frequency of the data segment that we're
# going to inject the signal into
duration = 4.
sampling_frequency = 2048.
# Specify the output directory and the name of the simulation.
outdir = 'outdir'
label = 'basic_tutorial4'
bilby.core.utils.setup_logger(outdir=outdir, label=label)
# Set up a random seed for result reproducibility. This is optional!
np.random.seed(88170235)
# We are going to inject a binary black hole waveform. We first establish a
# dictionary of parameters that includes all of the different waveform
# parameters, including masses of the two black holes (mass_1, mass_2),
# spins of both black holes (a, tilt, phi), etc.
injection_parameters = dict(
mass_1=36., mass_2=29., a_1=0.4, a_2=0.3, tilt_1=0.5, tilt_2=1.0,
phi_12=1.7, phi_jl=0.3, luminosity_distance=2000., iota=0.4, psi=2.659,
phase=1.3, geocent_time=1126259642.413, ra=1.375, dec=-1.2108)
# Fixed arguments passed into the source model
waveform_arguments = dict(waveform_approximant='IMRPhenomP',
reference_frequency=50., minimum_frequency=20.)
###Output
_____no_output_____
###Markdown
Inject into data
###Code
# Create the waveform_generator using a LAL BinaryBlackHole source function
waveform_generator = bilby.gw.WaveformGenerator(
duration=duration, sampling_frequency=sampling_frequency,
frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole,
waveform_arguments=waveform_arguments)
# Set up interferometers. In this case we'll use two interferometers
# (LIGO-Hanford (H1), LIGO-Livingston (L1). These default to their design
# sensitivity
ifos = bilby.gw.detector.InterferometerList(['H1', 'L1'])
ifos.set_strain_data_from_power_spectral_densities(
sampling_frequency=sampling_frequency, duration=duration,
start_time=injection_parameters['geocent_time'] - 3)
ifos.inject_signal(waveform_generator=waveform_generator,
parameters=injection_parameters)
###Output
/home/c1572221/src/bilby/bilby/gw/detector.py:1986: RuntimeWarning: invalid value encountered in multiply
frequency_domain_strain = self.__power_spectral_density_interpolated(frequencies) ** 0.5 * white_noise
14:36 bilby INFO : Injected signal in H1:
14:36 bilby INFO : optimal SNR = 12.09
14:36 bilby INFO : matched filter SNR = 10.88-0.37j
14:36 bilby INFO : luminosity_distance = 2000.0
14:36 bilby INFO : psi = 2.659
14:36 bilby INFO : a_2 = 0.3
14:36 bilby INFO : a_1 = 0.4
14:36 bilby INFO : geocent_time = 1126259642.41
14:36 bilby INFO : tilt_2 = 1.0
14:36 bilby INFO : phi_jl = 0.3
14:36 bilby INFO : ra = 1.375
14:36 bilby INFO : phase = 1.3
14:36 bilby INFO : mass_2 = 29.0
14:36 bilby INFO : mass_1 = 36.0
14:36 bilby INFO : phi_12 = 1.7
14:36 bilby INFO : dec = -1.2108
14:36 bilby INFO : iota = 0.4
14:36 bilby INFO : tilt_1 = 0.5
14:36 bilby INFO : Injected signal in L1:
14:36 bilby INFO : optimal SNR = 9.79
14:36 bilby INFO : matched filter SNR = 10.00+0.07j
14:36 bilby INFO : luminosity_distance = 2000.0
14:36 bilby INFO : psi = 2.659
14:36 bilby INFO : a_2 = 0.3
14:36 bilby INFO : a_1 = 0.4
14:36 bilby INFO : geocent_time = 1126259642.41
14:36 bilby INFO : tilt_2 = 1.0
14:36 bilby INFO : phi_jl = 0.3
14:36 bilby INFO : ra = 1.375
14:36 bilby INFO : phase = 1.3
14:36 bilby INFO : mass_2 = 29.0
14:36 bilby INFO : mass_1 = 36.0
14:36 bilby INFO : phi_12 = 1.7
14:36 bilby INFO : dec = -1.2108
14:36 bilby INFO : iota = 0.4
14:36 bilby INFO : tilt_1 = 0.5
###Markdown
For simplicity, we will fix all parameters here to the injected value and only vary over mass1 and mass2,
###Code
priors = injection_parameters.copy()
priors['mass_1'] = bilby.prior.Uniform(name='mass_1', minimum=10, maximum=80, unit=r'$M_{\\odot}$')
priors['mass_2'] = bilby.prior.Uniform(name='mass_1', minimum=10, maximum=80, unit=r'$M_{\\odot}$')
###Output
_____no_output_____
###Markdown
Here we create arbitrary jump proposals. This will highlight the necessary features of a jump proposal in ptmcmc. That is it takes the current position, x, then outputs a new position , q, and the jump probability i.e. p(x -> q). These will then be passed to the standard metropolis hastings condition. The two proposals below are probably not very good ones, ideally we would use proposals based upon our kmowledge of the problem/parameter space. In general for these proposals lqxy will certainly not be 0
###Code
class UniformJump(object):
def __init__(self, pmin, pmax):
"""Draw random parameters from pmin, pmax"""
self.pmin = pmin
self.pmax = pmax
def unjump(self, x, it, beta):
"""
Function prototype must read in parameter vector x,
sampler iteration number it, and inverse temperature beta
"""
# log of forward-backward jump probability
lqxy = 0
# uniformly drawm parameters
q = np.random.uniform(self.pmin, self.pmax, len(x))
return q, lqxy
class NormJump(object):
def __init__(self, step_size):
"""Draw random parameters from pmin, pmax"""
self.step_size = step_size
def normjump(self, x, it, beta):
"""
Function prototype must read in parameter vector x,
sampler iteration number it, and inverse temperature beta
"""
# log of forward-backward jump probability. this is only zero for simple examples.
lqxy = 0
# uniformly drawm parameters
q = np.random.multivariate_normal(x , self.step_size * np.eye(len(x)) , 1)
return q[0], lqxy
###Output
_____no_output_____
###Markdown
Below we create a dictionary containing our jump proposals and the relative weight of that proposal in the proposal cycle, these are then passed to bilby.run_sampler under the keyword argument custom_proposals =
###Code
normjump = NormJump(1)
normweight = 5
ujump = UniformJump(20, 40)
uweight = 1
custom = {'uniform': [ujump.unjump , uweight],
'normal': [normjump.normjump , normweight]}
# Initialise the likelihood by passing in the interferometer data (ifos) and
# the waveoform generator
likelihood = bilby.gw.GravitationalWaveTransient(
interferometers=ifos,waveform_generator=waveform_generator)
result = bilby.run_sampler(
likelihood=likelihood, priors=priors, sampler= 'PTMCMCsampler',custom_proposals = custom , Niter = 10**4 )
result.plot_corner()
###Output
_____no_output_____
###Markdown
PTMCMC produces the acceptance rate for each of the proposals (including the ones built in). This is taken as an average at a specified checkpoint. This is one (acceptnace rate is certainly not the only/even the best metric here. Think exploration v exploitation problem ) indicators of whether our jump proposal is a good one
###Code
sampler_meta = result.meta_data['sampler_meta']
jumps = sampler_meta['proposals']
plt.figure()
plt.xlabel('epoch')
plt.ylabel('acceptance rate')
for i,proposal in enumerate(jumps):
plt.plot(jumps[proposal] , label = proposal)
plt.legend(loc='best', frameon=True)
###Output
_____no_output_____
###Markdown
We can generate the 1d chains for each of the parameters too and the likelihood of those points on the chain
###Code
m2 = result.posterior.mass_2.values
m1 = result.posterior.mass_1.values
fig, ax = plt.subplots(nrows = 2 , ncols =1 , sharex = True , figsize = (8,8))
ax[0].plot(m1 , 'o', label = 'm1' )
ax[0].plot(m2 , 'o', label = 'm2' )
ax[0].set_ylabel(r'$M_{\odot}$')
ax[0].legend(loc = 'best' , frameon = True , fontsize = 12)
ax[1].plot(result.log_likelihood_evaluations)
ax[1].set_ylabel(r'$\mathcal{L}$')
ax[1].set_xlabel('iterations')
ax[1].set_xscale('log')
###Output
_____no_output_____
###Markdown
This notebook will show how to use the PTMCMCSampler, in particular this will highlight how to add custom jump proposals create 150914 like injection
###Code
# Set the duration and sampling frequency of the data segment that we're
# going to inject the signal into
duration = 4.
sampling_frequency = 2048.
# Specify the output directory and the name of the simulation.
outdir = 'outdir'
label = 'basic_tutorial4'
bilby.core.utils.setup_logger(outdir=outdir, label=label)
# Set up a random seed for result reproducibility. This is optional!
np.random.seed(88170235)
# We are going to inject a binary black hole waveform. We first establish a
# dictionary of parameters that includes all of the different waveform
# parameters, including masses of the two black holes (mass_1, mass_2),
# spins of both black holes (a, tilt, phi), etc.
injection_parameters = dict(
mass_1=36., mass_2=29., a_1=0.4, a_2=0.3, tilt_1=0.5, tilt_2=1.0,
phi_12=1.7, phi_jl=0.3, luminosity_distance=2000., theta_jn=0.4, psi=2.659,
phase=1.3, geocent_time=1126259642.413, ra=1.375, dec=-1.2108)
# Fixed arguments passed into the source model
waveform_arguments = dict(waveform_approximant='IMRPhenomP',
reference_frequency=50., minimum_frequency=20.)
###Output
_____no_output_____
###Markdown
Inject into data
###Code
# Create the waveform_generator using a LAL BinaryBlackHole source function
waveform_generator = bilby.gw.WaveformGenerator(
duration=duration, sampling_frequency=sampling_frequency,
frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole,
waveform_arguments=waveform_arguments)
# Set up interferometers. In this case we'll use two interferometers
# (LIGO-Hanford (H1), LIGO-Livingston (L1). These default to their design
# sensitivity
ifos = bilby.gw.detector.InterferometerList(['H1', 'L1'])
ifos.set_strain_data_from_power_spectral_densities(
sampling_frequency=sampling_frequency, duration=duration,
start_time=injection_parameters['geocent_time'] - 3)
ifos.inject_signal(waveform_generator=waveform_generator,
parameters=injection_parameters)
###Output
/home/c1572221/src/bilby/bilby/gw/detector.py:1986: RuntimeWarning: invalid value encountered in multiply
frequency_domain_strain = self.__power_spectral_density_interpolated(frequencies) ** 0.5 * white_noise
14:36 bilby INFO : Injected signal in H1:
14:36 bilby INFO : optimal SNR = 12.09
14:36 bilby INFO : matched filter SNR = 10.88-0.37j
14:36 bilby INFO : luminosity_distance = 2000.0
14:36 bilby INFO : psi = 2.659
14:36 bilby INFO : a_2 = 0.3
14:36 bilby INFO : a_1 = 0.4
14:36 bilby INFO : geocent_time = 1126259642.41
14:36 bilby INFO : tilt_2 = 1.0
14:36 bilby INFO : phi_jl = 0.3
14:36 bilby INFO : ra = 1.375
14:36 bilby INFO : phase = 1.3
14:36 bilby INFO : mass_2 = 29.0
14:36 bilby INFO : mass_1 = 36.0
14:36 bilby INFO : phi_12 = 1.7
14:36 bilby INFO : dec = -1.2108
14:36 bilby INFO : iota = 0.4
14:36 bilby INFO : tilt_1 = 0.5
14:36 bilby INFO : Injected signal in L1:
14:36 bilby INFO : optimal SNR = 9.79
14:36 bilby INFO : matched filter SNR = 10.00+0.07j
14:36 bilby INFO : luminosity_distance = 2000.0
14:36 bilby INFO : psi = 2.659
14:36 bilby INFO : a_2 = 0.3
14:36 bilby INFO : a_1 = 0.4
14:36 bilby INFO : geocent_time = 1126259642.41
14:36 bilby INFO : tilt_2 = 1.0
14:36 bilby INFO : phi_jl = 0.3
14:36 bilby INFO : ra = 1.375
14:36 bilby INFO : phase = 1.3
14:36 bilby INFO : mass_2 = 29.0
14:36 bilby INFO : mass_1 = 36.0
14:36 bilby INFO : phi_12 = 1.7
14:36 bilby INFO : dec = -1.2108
14:36 bilby INFO : iota = 0.4
14:36 bilby INFO : tilt_1 = 0.5
###Markdown
For simplicity, we will fix all parameters here to the injected value and only vary over mass1 and mass2,
###Code
priors = injection_parameters.copy()
priors['mass_1'] = bilby.prior.Uniform(name='mass_1', minimum=10, maximum=80, unit=r'$M_{\\odot}$')
priors['mass_2'] = bilby.prior.Uniform(name='mass_1', minimum=10, maximum=80, unit=r'$M_{\\odot}$')
###Output
_____no_output_____
###Markdown
Here we create arbitrary jump proposals. This will highlight the necessary features of a jump proposal in ptmcmc. That is it takes the current position, x, then outputs a new position , q, and the jump probability i.e. p(x -> q). These will then be passed to the standard metropolis hastings condition. The two proposals below are probably not very good ones, ideally we would use proposals based upon our kmowledge of the problem/parameter space. In general for these proposals lqxy will certainly not be 0
###Code
class UniformJump(object):
def __init__(self, pmin, pmax):
"""Draw random parameters from pmin, pmax"""
self.pmin = pmin
self.pmax = pmax
def unjump(self, x, it, beta):
"""
Function prototype must read in parameter vector x,
sampler iteration number it, and inverse temperature beta
"""
# log of forward-backward jump probability
lqxy = 0
# uniformly drawm parameters
q = np.random.uniform(self.pmin, self.pmax, len(x))
return q, lqxy
class NormJump(object):
def __init__(self, step_size):
"""Draw random parameters from pmin, pmax"""
self.step_size = step_size
def normjump(self, x, it, beta):
"""
Function prototype must read in parameter vector x,
sampler iteration number it, and inverse temperature beta
"""
# log of forward-backward jump probability. this is only zero for simple examples.
lqxy = 0
# uniformly drawm parameters
q = np.random.multivariate_normal(x , self.step_size * np.eye(len(x)) , 1)
return q[0], lqxy
###Output
_____no_output_____
###Markdown
Below we create a dictionary containing our jump proposals and the relative weight of that proposal in the proposal cycle, these are then passed to bilby.run_sampler under the keyword argument custom_proposals =
###Code
normjump = NormJump(1)
normweight = 5
ujump = UniformJump(20, 40)
uweight = 1
custom = {'uniform': [ujump.unjump , uweight],
'normal': [normjump.normjump , normweight]}
# Initialise the likelihood by passing in the interferometer data (ifos) and
# the waveoform generator
likelihood = bilby.gw.GravitationalWaveTransient(
interferometers=ifos,waveform_generator=waveform_generator)
result = bilby.run_sampler(
likelihood=likelihood, priors=priors, sampler= 'PTMCMCsampler',custom_proposals = custom , Niter = 10**4 )
result.plot_corner()
###Output
_____no_output_____
###Markdown
PTMCMC produces the acceptance rate for each of the proposals (including the ones built in). This is taken as an average at a specified checkpoint. This is one (acceptnace rate is certainly not the only/even the best metric here. Think exploration v exploitation problem ) indicators of whether our jump proposal is a good one
###Code
sampler_meta = result.meta_data['sampler_meta']
jumps = sampler_meta['proposals']
plt.figure()
plt.xlabel('epoch')
plt.ylabel('acceptance rate')
for i,proposal in enumerate(jumps):
plt.plot(jumps[proposal] , label = proposal)
plt.legend(loc='best', frameon=True)
###Output
_____no_output_____
###Markdown
We can generate the 1d chains for each of the parameters too and the likelihood of those points on the chain
###Code
m2 = result.posterior.mass_2.values
m1 = result.posterior.mass_1.values
fig, ax = plt.subplots(nrows = 2 , ncols =1 , sharex = True , figsize = (8,8))
ax[0].plot(m1 , 'o', label = 'm1' )
ax[0].plot(m2 , 'o', label = 'm2' )
ax[0].set_ylabel(r'$M_{\odot}$')
ax[0].legend(loc = 'best' , frameon = True , fontsize = 12)
ax[1].plot(result.log_likelihood_evaluations)
ax[1].set_ylabel(r'$\mathcal{L}$')
ax[1].set_xlabel('iterations')
ax[1].set_xscale('log')
###Output
_____no_output_____ |
handson/pytorch/finetuning_torchvision_models_tutorial.ipynb | ###Markdown
Finetuning Torchvision Models=============================**Author:** [`Nathan Inkawhich`](https://github.com/inkawhich) In this tutorial we will take a deeper look at how to finetune andfeature extract the [torchvisionmodels](https://pytorch.org/docs/stable/torchvision/models.html), allof which have been pretrained on the 1000-class Imagenet dataset. Thistutorial will give an indepth look at how to work with several modernCNN architectures, and will build an intuition for finetuning anyPyTorch model. Since each model architecture is different, there is noboilerplate finetuning code that will work in all scenarios. Rather, theresearcher must look at the existing architecture and make customadjustments for each model.In this document we will perform two types of transfer learning:finetuning and feature extraction. In **finetuning**, we start with apretrained model and update *all* of the model’s parameters for our newtask, in essence retraining the whole model. In **feature extraction**,we start with a pretrained model and only update the final layer weightsfrom which we derive predictions. It is called feature extractionbecause we use the pretrained CNN as a fixed feature-extractor, and onlychange the output layer. For more technical information about transferlearning see [here](http://cs231n.github.io/transfer-learning/) and[here](http://ruder.io/transfer-learning/).In general both transfer learning methods follow the same few steps:- Initialize the pretrained model- Reshape the final layer(s) to have the same number of outputs as the number of classes in the new dataset- Define for the optimization algorithm which parameters we want to update during training- Run the training step
###Code
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
print("PyTorch Version: ",torch.__version__)
print("Torchvision Version: ",torchvision.__version__)
###Output
_____no_output_____
###Markdown
Inputs------Here are all of the parameters to change for the run. We will use the*hymenoptera_data* dataset which can be downloaded[here](https://download.pytorch.org/tutorial/hymenoptera_data.zip).This dataset contains two classes, **bees** and **ants**, and isstructured such that we can use the[ImageFolder](https://pytorch.org/docs/stable/torchvision/datasets.htmltorchvision.datasets.ImageFolder) dataset, rather than writing our own custom dataset. Download the dataand set the ``data_dir`` input to the root directory of the dataset. The``model_name`` input is the name of the model you wish to use and mustbe selected from this list:- resnet- alexnet- vgg- squeezenet- densenet- inceptionThe other inputs are as follows: ``num_classes`` is the number ofclasses in the dataset, ``batch_size`` is the batch size used fortraining and may be adjusted according to the capability of yourmachine, ``num_epochs`` is the number of training epochs we want to run,and ``feature_extract`` is a boolean that defines if we are finetuningor feature extracting. If ``feature_extract = False``, the model isfinetuned and all model parameters are updated. If``feature_extract = True``, only the last layer parameters are updated,the others remain fixed.
###Code
!wget https://download.pytorch.org/tutorial/hymenoptera_data.zip
!unzip hymenoptera_data.zip
# Top level data directory. Here we assume the format of the directory conforms
# to the ImageFolder structure
data_dir = "./hymenoptera_data"
# Models to choose from [resnet, alexnet, vgg, squeezenet, densenet, inception]
model_name = "squeezenet"
# Number of classes in the dataset
num_classes = 2
# Batch size for training (change depending on how much memory you have)
batch_size = 8
# Number of epochs to train for
num_epochs = 15
# Flag for feature extracting. When False, we finetune the whole model,
# when True we only update the reshaped layer params
feature_extract = True
###Output
_____no_output_____
###Markdown
Helper Functions----------------Before we write the code for adjusting the models, lets define a fewhelper functions. Model Training and Validation CodeThe ``train_model`` function handles the training and validation of agiven model. As input, it takes a PyTorch model, a dictionary ofdataloaders, a loss function, an optimizer, a specified number of epochsto train and validate for, and a boolean flag for when the model is anInception model. The *is_inception* flag is used to accomodate the*Inception v3* model, as that architecture uses an auxiliary output andthe overall model loss respects both the auxiliary output and the finaloutput, as described[here](https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958).The function trains for the specified number of epochs and after eachepoch runs a full validation step. It also keeps track of the bestperforming model (in terms of validation accuracy), and at the end oftraining returns the best performing model. After each epoch, thetraining and validation accuracies are printed.
###Code
def train_model(model, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False):
since = time.time()
val_acc_history = []
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
# Get model outputs and calculate loss
# Special case for inception because in training it has an auxiliary output. In train
# mode we calculate the loss by summing the final output and the auxiliary output
# but in testing we only consider the final output.
if is_inception and phase == 'train':
# From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958
outputs, aux_outputs = model(inputs)
loss1 = criterion(outputs, labels)
loss2 = criterion(aux_outputs, labels)
loss = loss1 + 0.4*loss2
else:
outputs = model(inputs)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
if phase == 'val':
val_acc_history.append(epoch_acc)
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model, val_acc_history
###Output
_____no_output_____
###Markdown
Set Model Parameters’ .requires_grad attributeThis helper function sets the ``.requires_grad`` attribute of theparameters in the model to False when we are feature extracting. Bydefault, when we load a pretrained model all of the parameters have``.requires_grad=True``, which is fine if we are training from scratchor finetuning. However, if we are feature extracting and only want tocompute gradients for the newly initialized layer then we want all ofthe other parameters to not require gradients. This will make more senselater.
###Code
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
###Output
_____no_output_____
###Markdown
Initialize and Reshape the Networks-----------------------------------Now to the most interesting part. Here is where we handle the reshapingof each network. Note, this is not an automatic procedure and is uniqueto each model. Recall, the final layer of a CNN model, which is oftentimes an FC layer, has the same number of nodes as the number of outputclasses in the dataset. Since all of the models have been pretrained onImagenet, they all have output layers of size 1000, one node for eachclass. The goal here is to reshape the last layer to have the samenumber of inputs as before, AND to have the same number of outputs asthe number of classes in the dataset. In the following sections we willdiscuss how to alter the architecture of each model individually. Butfirst, there is one important detail regarding the difference betweenfinetuning and feature-extraction.When feature extracting, we only want to update the parameters of thelast layer, or in other words, we only want to update the parameters forthe layer(s) we are reshaping. Therefore, we do not need to compute thegradients of the parameters that we are not changing, so for efficiencywe set the .requires_grad attribute to False. This is important becauseby default, this attribute is set to True. Then, when we initialize thenew layer and by default the new parameters have ``.requires_grad=True``so only the new layer’s parameters will be updated. When we arefinetuning we can leave all of the .required_grad’s set to the defaultof True.Finally, notice that inception_v3 requires the input size to be(299,299), whereas all of the other models expect (224,224). ResnetResnet was introduced in the paper [Deep Residual Learning for ImageRecognition](https://arxiv.org/abs/1512.03385). There are severalvariants of different sizes, including Resnet18, Resnet34, Resnet50,Resnet101, and Resnet152, all of which are available from torchvisionmodels. Here we use Resnet18, as our dataset is small and only has twoclasses. When we print the model, we see that the last layer is a fullyconnected layer as shown below:``` (fc): Linear(in_features=512, out_features=1000, bias=True) ```Thus, we must reinitialize ``model.fc`` to be a Linear layer with 512input features and 2 output features with:``` model.fc = nn.Linear(512, num_classes)``` AlexnetAlexnet was introduced in the paper [ImageNet Classification with DeepConvolutional NeuralNetworks](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf) and was the first very successful CNN on the ImageNet dataset. When weprint the model architecture, we see the model output comes from the 6thlayer of the classifier``` (classifier): Sequential( ... (6): Linear(in_features=4096, out_features=1000, bias=True) ) ```To use the model with our dataset we reinitialize this layer as``` model.classifier[6] = nn.Linear(4096,num_classes)``` VGGVGG was introduced in the paper [Very Deep Convolutional Networks forLarge-Scale Image Recognition](https://arxiv.org/pdf/1409.1556.pdf).Torchvision offers eight versions of VGG with various lengths and somethat have batch normalizations layers. Here we use VGG-11 with batchnormalization. The output layer is similar to Alexnet, i.e.``` (classifier): Sequential( ... (6): Linear(in_features=4096, out_features=1000, bias=True) )```Therefore, we use the same technique to modify the output layer``` model.classifier[6] = nn.Linear(4096,num_classes)``` SqueezenetThe Squeeznet architecture is described in the paper [SqueezeNet:AlexNet-level accuracy with 50x fewer parameters and <0.5MB modelsize](https://arxiv.org/abs/1602.07360) and uses a different outputstructure than any of the other models shown here. Torchvision has twoversions of Squeezenet, we use version 1.0. The output comes from a 1x1convolutional layer which is the 1st layer of the classifier:``` (classifier): Sequential( (0): Dropout(p=0.5) (1): Conv2d(512, 1000, kernel_size=(1, 1), stride=(1, 1)) (2): ReLU(inplace) (3): AvgPool2d(kernel_size=13, stride=1, padding=0) ) ```To modify the network, we reinitialize the Conv2d layer to have anoutput feature map of depth 2 as``` model.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))``` DensenetDensenet was introduced in the paper [Densely Connected ConvolutionalNetworks](https://arxiv.org/abs/1608.06993). Torchvision has fourvariants of Densenet but here we only use Densenet-121. The output layeris a linear layer with 1024 input features:``` (classifier): Linear(in_features=1024, out_features=1000, bias=True) ```To reshape the network, we reinitialize the classifier’s linear layer as``` model.classifier = nn.Linear(1024, num_classes)``` Inception v3Finally, Inception v3 was first described in [Rethinking the InceptionArchitecture for ComputerVision](https://arxiv.org/pdf/1512.00567v1.pdf). This network isunique because it has two output layers when training. The second outputis known as an auxiliary output and is contained in the AuxLogits partof the network. The primary output is a linear layer at the end of thenetwork. Note, when testing we only consider the primary output. Theauxiliary output and primary output of the loaded model are printed as:``` (AuxLogits): InceptionAux( ... (fc): Linear(in_features=768, out_features=1000, bias=True) ) ... (fc): Linear(in_features=2048, out_features=1000, bias=True)```To finetune this model we must reshape both layers. This is accomplishedwith the following``` model.AuxLogits.fc = nn.Linear(768, num_classes) model.fc = nn.Linear(2048, num_classes)```Notice, many of the models have similar output structures, but each mustbe handled slightly differently. Also, check out the printed modelarchitecture of the reshaped network and make sure the number of outputfeatures is the same as the number of classes in the dataset.
###Code
def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):
# Initialize these variables which will be set in this if statement. Each of these
# variables is model specific.
model_ft = None
input_size = 0
if model_name == "resnet":
""" Resnet18
"""
model_ft = models.resnet18(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "alexnet":
""" Alexnet
"""
model_ft = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 224
elif model_name == "vgg":
""" VGG11_bn
"""
model_ft = models.vgg11_bn(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 224
elif model_name == "squeezenet":
""" Squeezenet
"""
model_ft = models.squeezenet1_0(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
model_ft.num_classes = num_classes
input_size = 224
elif model_name == "densenet":
""" Densenet
"""
model_ft = models.densenet121(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "inception":
""" Inception v3
Be careful, expects (299,299) sized images and has auxiliary output
"""
model_ft = models.inception_v3(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
# Handle the auxilary net
num_ftrs = model_ft.AuxLogits.fc.in_features
model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
# Handle the primary net
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs,num_classes)
input_size = 299
else:
print("Invalid model name, exiting...")
exit()
return model_ft, input_size
# Initialize the model for this run
model_ft, input_size = initialize_model(model_name, num_classes, feature_extract, use_pretrained=True)
# Print the model we just instantiated
print(model_ft)
###Output
_____no_output_____
###Markdown
Load Data---------Now that we know what the input size must be, we can initialize the datatransforms, image datasets, and the dataloaders. Notice, the models werepretrained with the hard-coded normalization values, as described[here](https://pytorch.org/docs/master/torchvision/models.html).
###Code
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(input_size),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
print("Initializing Datasets and Dataloaders...")
# Create training and validation datasets
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']}
# Create training and validation dataloaders
dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=4) for x in ['train', 'val']}
# Detect if we have a GPU available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
###Output
_____no_output_____
###Markdown
Create the Optimizer--------------------Now that the model structure is correct, the final step for finetuningand feature extracting is to create an optimizer that only updates thedesired parameters. Recall that after loading the pretrained model, butbefore reshaping, if ``feature_extract=True`` we manually set all of theparameter’s ``.requires_grad`` attributes to False. Then thereinitialized layer’s parameters have ``.requires_grad=True`` bydefault. So now we know that *all parameters that have.requires_grad=True should be optimized.* Next, we make a list of suchparameters and input this list to the SGD algorithm constructor.To verify this, check out the printed parameters to learn. Whenfinetuning, this list should be long and include all of the modelparameters. However, when feature extracting this list should be shortand only include the weights and biases of the reshaped layers.
###Code
# Send the model to GPU
model_ft = model_ft.to(device)
# Gather the parameters to be optimized/updated in this run. If we are
# finetuning we will be updating all parameters. However, if we are
# doing feature extract method, we will only update the parameters
# that we have just initialized, i.e. the parameters with requires_grad
# is True.
params_to_update = model_ft.parameters()
print("Params to learn:")
if feature_extract:
params_to_update = []
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print("\t",name)
else:
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
print("\t",name)
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(params_to_update, lr=0.001, momentum=0.9)
###Output
_____no_output_____
###Markdown
Run Training and Validation Step--------------------------------Finally, the last step is to setup the loss for the model, then run thetraining and validation function for the set number of epochs. Notice,depending on the number of epochs this step may take a while on a CPU.Also, the default learning rate is not optimal for all of the models, soto achieve maximum accuracy it would be necessary to tune for each modelseparately.
###Code
# Setup the loss fxn
criterion = nn.CrossEntropyLoss()
# Train and evaluate
model_ft, hist = train_model(model_ft, dataloaders_dict, criterion, optimizer_ft, num_epochs=num_epochs, is_inception=(model_name=="inception"))
###Output
_____no_output_____
###Markdown
Comparison with Model Trained from Scratch------------------------------------------Just for fun, lets see how the model learns if we do not use transferlearning. The performance of finetuning vs. feature extracting dependslargely on the dataset but in general both transfer learning methodsproduce favorable results in terms of training time and overall accuracyversus a model trained from scratch.
###Code
# Initialize the non-pretrained version of the model used for this run
scratch_model,_ = initialize_model(model_name, num_classes, feature_extract=False, use_pretrained=False)
scratch_model = scratch_model.to(device)
scratch_optimizer = optim.SGD(scratch_model.parameters(), lr=0.001, momentum=0.9)
scratch_criterion = nn.CrossEntropyLoss()
_,scratch_hist = train_model(scratch_model, dataloaders_dict, scratch_criterion, scratch_optimizer, num_epochs=num_epochs, is_inception=(model_name=="inception"))
# Plot the training curves of validation accuracy vs. number
# of training epochs for the transfer learning method and
# the model trained from scratch
ohist = []
shist = []
ohist = [h.cpu().numpy() for h in hist]
shist = [h.cpu().numpy() for h in scratch_hist]
plt.title("Validation Accuracy vs. Number of Training Epochs")
plt.xlabel("Training Epochs")
plt.ylabel("Validation Accuracy")
plt.plot(range(1,num_epochs+1),ohist,label="Pretrained")
plt.plot(range(1,num_epochs+1),shist,label="Scratch")
plt.ylim((0,1.))
plt.xticks(np.arange(1, num_epochs+1, 1.0))
plt.legend()
plt.show()
###Output
_____no_output_____ |
Figure_2/Figure2_B_hingemotions.ipynb | ###Markdown
First eigenvector
###Code
# visualise first mode - not what we expect given the three domain structure of the protein
showProtein(fh_chainA_ca, mode=gnm[0]);
showMode(gnm[0], hinges=True, zero=True);
gnm[0].getHinges()
###Output
_____no_output_____
###Markdown
Second Eigenvector
###Code
sns.despine()
showMode(gnm[1], hinges=True, zero=True);
# visualise second mode - much more like what we would expect from a three domain protein
showProtein(fh_chainA_ca, mode=gnm[1]);
hingelist = gnm[1].getHinges()
gnm[1]
hingelist_adjusted = []
for item in hingelist:
item = item+49
hingelist_adjusted.append(item)
# get hinge residues
hingelist_adjusted
###Output
_____no_output_____
###Markdown
Custom Plotting
###Code
second_mode = gnm[1]
second_mode_eigvec = second_mode.getEigvec().round(3)
scatter_values = []
for item in hingelist:
scatter_values.append([item, second_mode_eigvec[item]])
# split the data into regions above and below 0 so that we can plot them in different colours
second_mode_eigvec_above =np.copy(second_mode_eigvec)
second_mode_eigvec_below = np.copy(second_mode_eigvec)
second_mode_eigvec_above[second_mode_eigvec_above < 0] = 0
second_mode_eigvec_above
second_mode_eigvec_below[second_mode_eigvec_below > 0] = 0
second_mode_eigvec_below
hingepoints = pd.DataFrame.from_records(scatter_values, columns= ["x", "y"])
plt.rcParams['figure.figsize'] = [10, 6]
# do the plotting
ax2= sns.scatterplot(x = hingepoints["x"], y = hingepoints["y"], s= 100, zorder = 10, color = "#C0C0C0",linewidth = 1.5, edgecolor = "k")
ax = sns.lineplot(data = second_mode_eigvec_above, lw = 2, color = "#F1A340")
ax = sns.lineplot(data = second_mode_eigvec_below, lw = 2, color = "#998EC3")
ax.set_xlim([-49,468])
ax.set_xlabel("Residue", size =14)
ax.set_ylabel("Second Eigenvector Value", size = 14)
ax.set_ylim([-0.125, 0.125])
ax.set_xticks([0,51, 151,251,351,451])
ax.set_xticklabels([49,100,200,300,400,500])
ax.axhline(0.000001, c = "#949798", lw = 4
)
sns.despine()
plt.savefig("Figure2_B_gnmsecondmotion.png", dpi = 300)
###Output
_____no_output_____ |
examples/tutorial_xray_timeseries.ipynb | ###Markdown
XRAY TutorialThis shows how to interact with some NetCDF time series data using XRAYTo open this in the browser type:```ipython notebook tutorial_name.ipynb```
###Code
import xray
###Output
_____no_output_____
###Markdown
Load the data as an xray.Dataset object
###Code
ncfile = '../DATA/_for_Ed_GalvestonFluxes_Apr2009.nc'
ds = xray.open_dataset(ncfile,engine='netcdf4')
###Output
Data variables:
*empty*
###Markdown
This Dataset is empty because the data are stored in Groups. There does not appear to be a way of interavtively exploring groups in xray v0.6.1
###Code
# To get the groups from a netcdf4 file use netcdf4-python (this should be in xray!)
from netCDF4 import Dataset
nc = Dataset(ncfile)
print nc
print nc.groups.keys()
group = nc.groups.keys()[0]
ds = xray.open_dataset('../DATA/_for_Ed_GalvestonFluxes_Apr2009.nc', group=group)
print ds
# Now plot the data
%matplotlib inline # This is only necessary for the notebook
ds.Q.plot()
# Plot everything together
plt.figure(figsize=(5,8))
plt.subplot(411)
ds.Q.plot()
plt.subplot(412)
ds.Uwind.plot()
plt.subplot(413)
ds.Vwind.plot()
plt.subplot(414)
ds.eta.plot()
# Metadata is also stored in the variable attributes
print ds.Q.attrs
# You can treat each variable like a numpy array to compute other quantities e.g.,...
import numpy as np
speed = np.sqrt( ds.Uwind**2 + ds.Vwind**2)
speed.plot()
###Output
_____no_output_____ |
uhecr_model/checks/random_ra/run_simulation_sims.ipynb | ###Markdown
Comparison of arrival direction and joint modelsIn order to verify the model is working, we fit simulations made under the assumptions of the model. We also compare the differences between a model for only the UHECR arrival directions and one for both the UHECR arrival directions and energies.*This code is used to produce the data shown in Figures 6, 7 and 8 (left panel) in Capel & Mortlock (2019).* *See the separate notebook in this directory for the actual plotting of figures.*
###Code
import numpy as np
import os
import h5py
# %matplotlib inline
from matplotlib import pyplot as plt
from pandas import DataFrame
from fancy import Data, Model, Analysis
from fancy.interfaces.stan import get_simulation_input, coord_to_uv
'''Setting up'''
# Define location of Stan files
stan_path = '../../stan/'
# Define file containing source catalogue information
source_file = '../../data/sourcedata.h5'
# make output directory if it doesnt exist
if not os.path.isdir("output"):
os.mkdir("output")
# source_types = ["SBG_23", "2FHL_250Mpc", "swift_BAT_213"]
source_types = ["SBG_23"]
# detector_types = ["auger2010", "auger2014", "TA2015"]
detector_type = "TA2015"
# detector_type = "auger2014"
# set random seed
random_seed = 19990308
# flag to control showing plots or not
show_plot = True
'''set detector and detector properties'''
if detector_type == "TA2015":
from fancy.detector.TA2015 import detector_properties, alpha_T, M, Eth
elif detector_type == "auger2014":
from fancy.detector.auger2014 import detector_properties, alpha_T, M, Eth
elif detector_type == "auger2010":
from fancy.detector.auger2010 import detector_properties, alpha_T, M, Eth
else:
raise Exception("Undefined detector type!")
'''Create joint simulated dataset'''
# Define a Stan simulation to run
sim_name = stan_path + 'joint_model_sim.stan' # simulate all processes
# Define simulation using Model object and compile Stan code if necessary
simulation = Model(sim_filename = sim_name, include_paths = stan_path)
simulation.compile(reset=False)
for source_type in source_types:
print("Current Source: {0}".format(source_type))
# define separate files
table_file = '../tables/tables_{0}_{1}.h5'.format(source_type, detector_type)
sim_output_file = 'output/joint_model_simulation_{0}_{1}.h5'.format(source_type, detector_type)
# Define a source catalogue and detector exposure
# In the paper we use the SBG catalogue
data = Data()
data.add_source(source_file, source_type)
data.add_detector(detector_properties)
# Plot the sources in Galactic coordinates
# if show_plot:
# data.show();
# Define associated fraction
f = 0.5
# Simulation input
B = 20 # nG
alpha = 3.0
Eth = Eth
Eth_sim = 20 # EeV
# number of simulated inputs
# changes the background flux linearly
# should choose Nsim such that FT is the same for
# each observatory
# this ensures that L, F0 are the same
#
# for PAO, we saw that FT, detector_type = 0.3601
FT_PAO = 0.3601 # total, detector_type flux using {1} data with Nsim = 2500, detector_type
Nsim_expected = FT_PAO / (M / alpha_T)
Nsim = int(np.round(Nsim_expected))
# check value for Nsim
print("Simulated events: {0}".format(Nsim))
# L in yr^-1, F in km^-2 yr^-1
L, F0 = get_simulation_input(Nsim, f, data.source.distance, M, alpha_T)
# To scale between definition of flux in simulations and fits
flux_scale = (Eth / Eth_sim)**(1 - alpha)
simulation.input(B = B, L = L, F0 = F0,
alpha = alpha, Eth = Eth)
# check luminosity and isotropic flux values
# L ~ O(10^39), F0 ~ 0.18
# same luminosity so only need to check one value
print("Simulated Luminosity: {0:.3e}".format(L[0]))
print("Simulated isotropic flux: {0:.3f}".format(F0))
# What is happening
summary = b'Simulation using the joint model and SBG catalogue' # must be a byte str
# summary = b'Simulation using the joint model and Swift-BAT catalogue' # must be a byte str
# Define an Analysis object to bring together Data and Model objects
sim_analysis = Analysis(data, simulation, analysis_type = 'joint',
filename = sim_output_file, summary = summary)
print("Building tables...")
# Build pre-computed values for the simulation as you go
# So that you can try out different parameters
sim_analysis.build_tables(sim_only = True)
print("Running simulation...")
# Run simulation
sim_analysis.simulate(seed = random_seed, Eth_sim = Eth_sim)
# Save to file
sim_analysis.save()
# print resulting UHECR observed after propagation and Elosses
print("Observed simulated UHECRs: {0}\n".format(len(sim_analysis.source_labels)))
# print plots if flag is set to true
if show_plot:
sim_analysis.plot("arrival_direction")
# check initial locations of UHECR since analysis.plot somehow doesnt show this plot
from fancy.plotting.allskymap import AllSkyMap
allskymap = AllSkyMap(projection="hammer", lat_0=0., lon_0=0.)
fig, ax = plt.subplots(figsize=(12,9))
sim_analysis.data.uhecr.plot(allskymap)
'''Fit using arrival direction model'''
from astropy.coordinates import SkyCoord
uhecr_coord_rands = []
for source_type in source_types:
print("Current Source: {0}".format(source_type))
# define separate files
table_file = '../../tables/tables_{0}_{1}.h5'.format(source_type, detector_type)
sim_output_file = 'output/joint_model_simulation_{0}_{1}.h5'.format(source_type, detector_type)
arrival_output_file = 'output/arrival_direction_fit_{0}_{1}.h5'.format(source_type, detector_type)
# Define data from simulation
data = Data()
data.from_file(sim_output_file)
# modify the right ascensions of uhecr data
uhecr_coord = data.uhecr.coord
uhecr_coord.representation_type = 'spherical'
uhecr_ra = uhecr_coord.ra.deg
uhecr_dec = uhecr_coord.dec.deg
# randomize the right ascension only
uhecr_ra_rand = (uhecr_ra + np.random.rand(len(uhecr_ra)) * 360.) % 360.
uhecr_coord_rand = SkyCoord(ra=uhecr_ra_rand, dec=uhecr_dec, frame="icrs", unit="deg")
uhecr_coord_rand.representation_type = "cartesian"
# append to list for usage with joint model
uhecr_coord_rands.append(uhecr_coord_rand)
# also obtain unit vector
data.uhecr.coord = uhecr_coord_rand
data.uhecr.unit_vector = coord_to_uv(data.uhecr.coord)
if show_plot:
data.show()
# Arrival direction model
model_name = stan_path + 'arrival_direction_model.stan'
# Compile
model = Model(model_filename = model_name, include_paths = stan_path)
model.compile(reset=False)
# Define threshold energy in EeV
model.input(Eth = Eth)
# What is happening
summary = b'Fit of the arrival direction model to the joint simulation'
# Define an Analysis object to bring together Data and Model objects
analysis = Analysis(data, model, analysis_type = 'joint',
filename = arrival_output_file, summary = summary)
# Define location of pre-computed values used in fits
# (see relevant notebook for how to make these files)
# Each catalogue has a file of pre-computed values
analysis.use_tables(table_file)
# Fit the Stan model
fit = analysis.fit_model(chains = 16, iterations = 500, seed = random_seed)
# Save to analysis file
analysis.save()
'''Fit using joint model'''
for i, source_type in enumerate(source_types):
print("Current Source: {0}".format(source_type))
# define separate files
table_file = '../../tables/tables_{0}_{1}.h5'.format(source_type, detector_type)
sim_output_file = 'output/joint_model_simulation_{0}_{1}.h5'.format(source_type, detector_type)
joint_output_file = 'output/joint_fit_{0}_{1}.h5'.format(source_type, detector_type)
# Define data from simulation
data = Data()
data.from_file(sim_output_file)
data.uhecr.coord = uhecr_coord_rands[i]
data.uhecr.unit_vector = coord_to_uv(data.uhecr.coord)
# create Model and compile
model_name = stan_path + 'joint_model.stan'
model = Model(model_filename = model_name, include_paths = stan_path)
model.compile(reset=False)
model.input(Eth = Eth)
# create Analysis object
summary = b'Fit of the joint model to the joint simulation'
analysis = Analysis(data, model, analysis_type = 'joint',
filename = joint_output_file, summary = summary)
analysis.use_tables(table_file)
# Fit the Stan model
fit = analysis.fit_model(chains = 16, iterations = 500, seed = random_seed)
# Save to analysis file
analysis.save()
###Output
Current Source: SBG_23
Using cached StanModel
|
PMK_Team1/QCHack_IBM_RNG.ipynb | ###Markdown
This project implements random number generator using any number of qubits using effect of quantum interference.Basic algorithm is:-Initiate a quantum circuit of n qubitsApply an oracle to all qubits.Oracle does following:--Generates random bit,0 or 1.If the random bit is 0,it applies two hadamard gates in succession to input qubit(which is in |0> state initially),so due to constructive and destructive interference effect,we get initial qubit state.If the random bit is 0,it applies X gate (i.e.bit flip gate)followed by two hadamard gates in succession to input qubit(which is in |0> state initially),so due to constructive and destructive interference effect,they cancel out and we get flipped qubit state. Measure all qubits and print the bit pattern generated by this process after running circuit in qasm simulator mode.Challenge faced was how to use interference created using hadamard gates to generate a bit pattern.So it was decided to implement the algorithm in the above way.Implications of this projectc are it could be extended to any number of qubits to generate a random bit pattern which as per https://www.jrussellhuffman.com/joyvazirani/ could be extended to create beautiful visualizations and it could be used in wireless domain to test entire transmitter and receiver encoding and decoding chain considering AWGN noise and channel effects by generating random input data corresponding to given Transport block size.
###Code
from random import getrandbits
def random_interf_oracle(qbit_idx,qc):
if (getrandbits(1)==0):
qc.h(qbit_idx)
qc.h(qbit_idx)
qc.barrier()
else:
qc.x(qbit_idx)
qc.barrier()
qc.h(qbit_idx)
qc.h(qbit_idx)
qc.barrier()
from qiskit import *
from qiskit import Aer
# Create a Quantum Circuit acting on a single qubit
circ = QuantumCircuit(n,n)
for i in range(n):
random_interf_oracle(i,circ)
#measurement
circ.measure(i,i)
# Import Aer and execute
from qiskit import Aer, execute
backend_sim = Aer.get_backend('qasm_simulator')
sim = execute(circ, backend_sim, shots=1000)
sim_result = sim.result()
counts = sim_result.get_counts(circ)
print('==================================')
print('\nBelow is the output of executing the circuit 1000 times.')
# We can plot the outcomes stored in the variable 'counts' using the following code:
from qiskit.visualization import plot_histogram
plot_histogram(counts)
###Output
==================================
Below is the output of executing the circuit 1000 times.
|
notebooks/Statsbomb_Data_Download.ipynb | ###Markdown
Downloading Data and Brief Introduction to Some VizualizationsIn this example, we will get world cup data. From the statsbomb [Competition List](https://github.com/statsbomb/open-data/blob/master/data/competitions.json) we see that the world cup data is given competition_id=43 Import Packages
###Code
%load_ext autoreload
%autoreload 2
import os, sys
# We have to append the parent folder because we are running this from inside notebook
sys.path.append("../")
import pystatsbomb as sb
from pystatsbomb.plotting import plotpitch, plotpitch_bokeh
from pystatsbomb.plotting import pass_rose, getArrow
import datetime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from bokeh.models import Arrow, OpenHead, Label
from bokeh.io import show, output_notebook
pd.options.mode.chained_assignment = None # default='warn'
%matplotlib inline
output_notebook()
###Output
_____no_output_____
###Markdown
Start the client
###Code
c = sb.Client(local_source_dir=None)
c.get_competitions()
# Check to see what competitions are returned
print([{comp["competition_name"]: comp["competition_id"]} for comp in c.competitions])
###Output
[{"FA Women's Super League": 37}, {'FIFA World Cup': 43}, {'NWSL': 49}]
###Markdown
Download all the World Cup DataThis will store the data inside the class as:* df_matches* df_lineups* df_events
###Code
comp_id = 43 # FIFA World Cup
match_ids = [7585] # Columbia - England
deep_lineup_parse = True
c.get_all_sb_data(
comp_id=comp_id,
match_ids=match_ids,
deep_lineup_parse=deep_lineup_parse,
toPandas=True)
df_matches = c.df_matches.set_index('match_id')
df_lineups = c.df_lineups
df_events = c.df_events
###Output
_____no_output_____
###Markdown
Take a Look at One MatchGroup Stage Round 1: England v Columbia
###Code
match_id = 7585
match_lineups = df_lineups.loc[df_lineups["match_id"] == match_id]
match_events = df_events.loc[df_events["match_id"] == match_id]
match_lineups.iloc[1:-1:5, :]
print("Number of Match Events: {}".format(len(match_events)))
###Output
Number of Match Events: 3168
###Markdown
Load Data From Local FolderIf you have already downloaded the data from statsbomb, point your client to the folder with the files and run the client. This time we will add the tag "deep_lineup_parse" which will add tactical changes, starter data, positions, substitutions, and minutes played to the list of dataframes generated
###Code
c = sb.Client(local_source_dir="../../open-data/data")
c.get_competitions()
# Check to see what competitions are returned
print([{comp["competition_name"]: comp["competition_id"]} for comp in c.competitions])
comp_id = 43 # FIFA World Cup
match_ids = [7585] # Columbia - England
deep_lineup_parse = True
c.get_all_sb_data(
comp_id=comp_id,
match_ids=match_ids,
deep_lineup_parse=deep_lineup_parse,
toPandas=True)
df_deep_lineups = c.df_deep_lineup
df_play_time = c.df_play_time
df_positions = c.df_positions
df_play_time.groupby(["team_name", "player.name", "match_id"], as_index=False)\
.sum().sort_values("total_minutes_played", ascending=False).iloc[1:-1:5, :]
###Output
_____no_output_____
###Markdown
Brief Look at Passes
###Code
df_matches = c.df_matches.set_index('match_id')
df_lineups = c.df_lineups
df_events = c.df_events
pass_values = [
'index', 'duration', 'id', 'period','minute','second', 'player_name',
'position_name',
'possession_team_name', 'possession', 'possession_team_id',
'related_events', 'under_pressure', 'location',
# Pass details
'pass_aerial_won', 'pass_angle', 'pass_assisted_shot_id', 'pass_backheel',
'pass_body_part_id', 'pass_body_part_name', 'pass_cross', 'pass_deflected',
'pass_end_location', 'pass_goal_assist', 'pass_height_id', 'pass_height_name',
'pass_length', 'pass_outcome_id', 'pass_outcome_name', 'pass_recipient_id',
'pass_recipient_name', 'pass_shot_assist', 'pass_switch', 'pass_through_ball',
'pass_type_id', 'pass_type_name']
df_passes = df_events.loc[df_events['type_name'].isin(['Pass']), pass_values].set_index("index")
df_passes['pass_outcome_name'].fillna('Complete', inplace=True)
df_passes.shape
pd.crosstab(df_passes["possession_team_name"], df_passes['pass_outcome_name']).sort_values("Complete", ascending=False).head()
###Output
_____no_output_____
###Markdown
Pass Angle
###Code
def pass_direction(x):
"""According to statsbomb, pass_angle is between 0 (pass ahead) and pi (pass behind). Clockwise
We divide the circle into 4 equal sections. Directions are forward, right, left, behind"""
pi_div = np.pi / 4
if (x <= pi_div) & (x >= -pi_div):
return "Forward"
elif (x > pi_div) & (x <= 3 * pi_div):
return "Right"
elif (x > 3 * pi_div) & (x < -3 * pi_div):
return "Behind"
else:
return "Left"
def pass_angle_deg(x):
"""Convert negative angles to positive radians from 0 to 2pi clockwise"""
if x >= 0:
return x * 180. / np.pi
else:
return (2 * np.pi + x) * 180. / np.pi
df_passes['pass_direction'] = df_passes['pass_angle'].apply(pass_direction)
df_passes['pass_angle_deg'] = df_passes['pass_angle'].apply(pass_angle_deg)
sns.pairplot(df_passes[['pass_length', 'pass_angle', 'pass_outcome_name']], hue='pass_outcome_name')
###Output
C:\Program Files\Anaconda3\envs\statsbomb\lib\site-packages\scipy\stats\stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
###Markdown
Pass Rose PlotThis is the chart of pass length and frequency for all passes in the dataset
###Code
pass_rose(df_passes)
###Output
1161 total observations
24
24
###Markdown
Base Map
###Code
plotpitch()
plotpitch_bokeh(display=True)
###Output
_____no_output_____
###Markdown
Possession
###Code
def getPossession(df):
df_pos = df.loc[:, ("possession_team_name", "possession", "period", "minute", "second")]
df_pos["time"] = 60 * df_pos["minute"] + df_pos["second"]
df_pos = df_pos.loc[df_pos["time"] > 0 ]
df_pos_grp = df_pos.groupby(["possession_team_name", "possession"]).agg(
{"time": ["min", "max"]})
df_pos_grp["time_of_pos"] = df_pos_grp["time"]["max"] - df_pos_grp["time"]["min"]
df_pos_grp = df_pos_grp.groupby("possession_team_name").sum() / 60
del df_pos_grp["time"]
return df_pos_grp
getPossession(df_events)
###Output
_____no_output_____
###Markdown
Shots
###Code
shot_values = [
'index', 'duration', 'id', 'period','minute','second', 'player_name',
'position_name', 'possession_team_name', 'possession', 'possession_team_id',
'related_events', 'under_pressure', 'location',
# Shot details
'shot_body_part_id', 'shot_body_part_name', 'shot_end_location',
'shot_first_time', 'shot_follows_dribble', 'shot_freeze_frame',
'shot_key_pass_id', 'shot_outcome_id', 'shot_outcome_name',
'shot_statsbomb_xg', 'shot_technique_id', 'shot_technique_name',
'shot_type_id', 'shot_type_name',
]
df_shots = df_events.loc[df_events['type_name'].isin(['Shot']), shot_values]
df_shots['shot_outcome_name'].value_counts()
###Output
_____no_output_____
###Markdown
First 10 shots in matplotlib
###Code
fig, ax = plotpitch(display=False)
for line in df_shots[['location', 'shot_end_location', 'possession_team_name']].head(10).iterrows():
x,y,dx,dy,c = getArrow(line[1][0], line[1][1], line[1][2], 'England', viz="mpl")
ax.arrow(x, y, dx, dy, head_width=1, head_length=1, fc=c, ec=c)
ax.annotate(line[0], xy=(x, y), xytext=(x+1, y+1))
plt.show()
###Output
_____no_output_____
###Markdown
First 10 shots in bokeh
###Code
plot = plotpitch_bokeh(display=False)
for line in df_shots[['location', 'shot_end_location', 'possession_team_name']].head(10).iterrows():
x,y,dx,dy,c = getArrow(line[1][0], line[1][1][0:2], line[1][2], 'England', viz="bokeh")
plot.add_layout(Arrow(end=OpenHead(size=10, line_color=c),
line_color=c,
x_start=x, y_start=y, x_end=dx, y_end=dy))
plot.add_layout(Label(x=x, y=y, text_font_size="10pt", text=str(line[0])))
show(plot)
###Output
_____no_output_____ |
v1.52.2/Tests/7.1 Question groups variation analysis tests.ipynb | ###Markdown
7.1 Question groups variation analysis tests Table of contents[H2b Specific scientific questions](H2b-Specific-scientific-questions) [a\)](a%29) [b\)](b%29) [c\)](c%29) [d\)](d%29) [e\)](e%29)
###Code
%run "../Functions/7. Question groups variation analysis.ipynb"
debugging = False
if debugging:
[x for x in demographicQuestions if not x in numericDemographicQuestions]
for entryIndex in gfdf.index:
try:
for x in getNumeric(gfdf.loc[entryIndex], _source=correctAnswers + demographicAnswers):
if type(x) != float:
print(entryIndex)
break
except Exception:
print(entryIndex)
#getNumeric(gfdf.iloc[0], _source=correctAnswers + demographicAnswers).apply(lambda x: print(type(x)))
if debugging:
for x in getNumeric(gfdf.loc['228'], _source=correctAnswers + demographicAnswers):
if type(x) != float:
print(type(x))
gfdf.loc['208']
[x*100/125 for x in [42, 30, 34, 33, 26, 36, 65, 56]]
# x is extreme opinion, y is moderate opinion
def ratioExtreme(x,y):
return (x * 100)/(x + y)
#Bio
ratioExtreme(8,14),ratioExtreme(10,20),ratioExtreme(10,44),ratioExtreme(12,41)
#SB
ratioExtreme(14,19),ratioExtreme(16,22),ratioExtreme(8,33),ratioExtreme(10,36)
#VG
ratioExtreme(5,19),ratioExtreme(8,20),ratioExtreme(42,42),ratioExtreme(46,30)
#engineering
ratioExtreme(9,17),ratioExtreme(13,23),ratioExtreme(29,36),ratioExtreme(27,29)
scientificQuestions
gfdf = gfdfPlaytestPhase1PretestPosttestUniqueProfilesVolunteers.copy()
rmdf = rmdfPlaytestPhase1PretestPosttestUniqueProfilesVolunteers.copy()
allData = allDataPlaytestPhase1PretestPosttestUniqueProfilesVolunteers.copy()
saveFiles = False
baseGradingBase = (1,0,0,0)
lenientGradingBase = (3,2,1,0)
categoryLenientGradingBase = (2,2,1,0)
# This strict grading policy penalizes errors, and gives more points for "I don't know" answers than to errors.
strictGradingBase = (3,1,2,0)
baseMisconceptionCoding = {
0: "incorrect",
1: "correct",
}
lenientMisconceptionCoding = {
0: "severe misconception",
1: "doesn't know",
2: "mild misconception",
3: "correct",
}
categoryLenientMisconceptionCoding = {
0: "misconception",
1: "doesn't know",
2: "correct category"
}
strictMisconceptionCoding = {
0: "severe misconception",
1: "mild misconception",
2: "doesn't know",
3: "correct",
}
###Output
_____no_output_____
###Markdown
Per question analysis Interest variation
###Code
#QInterestBiology,\
QCuriosityBiology,\
QCuriositySyntheticBiology,\
QCuriosityEngineering,\
QCuriosityVideoGames,\
#QEnjoyed
curiosityQuestions = [
#QInterestBiology,\
QCuriosityBiology,\
QCuriositySyntheticBiology,\
QCuriosityEngineering,\
QCuriosityVideoGames,\
#QEnjoyed,\
]
questions = [deltaPrefix + " " + q for q in curiosityQuestions]
questions
plotPretestPosttestDeltaGfdf(allData, curiosityQuestions)
for q in curiosityQuestions:
analyseQuestion(allDataPlaytestPhase1PretestPosttestUniqueProfiles, q)
###Output
_____no_output_____
###Markdown
1-5 scale analysis
###Code
# from dataFormating.ipynb
QCuriosityCoding = {"A lot": 4, "Beaucoup": 4, "Enormément": 5, "Énormément": 5, "Extremely": 5, "Moderately": 3, "Moyennement": 3, "Slightly": 2, "Un peu": 2, "I don't know": 3, "Je ne sais pas": 3, "Not at all": 1, "Pas du tout": 1}
QCuriosityBiologyCoding = QCuriosityCoding
QCuriositySyntheticBiologyCoding = QCuriosityCoding
QCuriosityEngineeringCoding = QCuriosityCoding
QCuriosityVideoGamesCoding = QCuriosityCoding
#biologyInterestCoding = {"A lot": 4, "Beaucoup": 4, "Enormément": 5, "Énormément": 5, "Extremely": 5, "Moderately": 3, "Moyennement": 3, "Slightly": 2, "Un peu": 2, "I don't know": 3, "Je ne sais pas": 3, "Not at all": 1, "Pas du tout": 1}
#QEnjoyedCoding = {'Extremely': 4, 'A lot': 3, 'Not at all': 0, 'A bit': 1, 'Moderately': 2, "No": 0, "Not applicable: not played yet": -1}
curiosityQuestionsCoding = [QCuriosityBiologyCoding, QCuriositySyntheticBiologyCoding, QCuriosityEngineeringCoding, QCuriosityVideoGamesCoding]
gfdfResult = compareUsingCustomCorrection(
gfdfPlaytestPhase1PretestPosttestUniqueProfiles,
curiosityQuestions,
curiosityQuestionsCoding,
)
plotPretestPosttestDeltaGfdf(
gfdfResult,
curiosityQuestions,
plotGraphs = True,
printData = True,
saveFiles = saveFiles,
suffix = ' 1-5 score variation'
)
###Output
_____no_output_____
###Markdown
H2b Specific scientific questions a)
###Code
h2baQuestions = [QGenotypePhenotype]
plotPretestPosttestDeltaGfdf(allData, h2baQuestions, qualitativeCoding = baseMisconceptionCoding)
graphTitleStem = "genotype-phenotype question - "
analyseQuestionGroupParameters = [
(getQGenotypePhenotypeGrading(*baseGradingBase), graphTitleStem + "base grading", baseMisconceptionCoding),
(getQGenotypePhenotypeGrading(*lenientGradingBase), graphTitleStem + "lenient grading", lenientMisconceptionCoding),
(getQGenotypePhenotypeGrading(*strictGradingBase), graphTitleStem + "strict grading", strictMisconceptionCoding),
]
for grading, title, coding in analyseQuestionGroupParameters:
analyseQuestionGroup(
gfdf,
h2baQuestions,
grading,
title=title,
plotGraphs = True,
printData = True,
saveFiles = saveFiles,
qualitativeCoding = coding,
)
###Output
_____no_output_____
###Markdown
c)
###Code
h2bcQuestions = [
#'What does this device do? RBS:PCONS:FLHDC:TER XXX', #32
QDeviceRbsPconsFlhdcTer,
#'What does this device do? PCONS:RBS:FLHDC:TER', #33
QDevicePconsRbsFlhdcTer,
#'What does this device do? PBAD:RBS:GFP:TER', #34
QDevicePbadRbsGfpTer,
#'What does this device do? PBAD:GFP:RBS:TER XXX', #35
QDevicePbadGfpRbsTer,
#'What does this device do? GFP:RBS:PCONS:TER XXX', #36
QDeviceGfpRbsPconsTer,
#'What does this device do? PCONS:GFP:RBS:TER XXX', #37
QDevicePconsGfpRbsTer,
#'What does this device do? AMPR:RBS:PCONS:TER XXX', #38
QDeviceAmprRbsPconsTer,
#'What does this device do? RBS:PCONS:AMPR:TER XXX', #39
QDeviceRbsPconsAmprTer,
]
###Output
_____no_output_____
###Markdown
biobrick grammar analysis
###Code
graphTitleStem = "device questions - "
analyseQuestionGroupParameters = [
(getDeviceQuestionsGrading(*baseGradingBase), graphTitleStem + "base grading"),
(getDeviceQuestionsGrading(*categoryLenientGradingBase), graphTitleStem + "category lenient grading"),
(getDeviceQuestionsGrading(*lenientGradingBase), graphTitleStem + "lenient grading"),
(getDeviceQuestionsGrading(*strictGradingBase), graphTitleStem + "strict grading"),
]
for grading, title in analyseQuestionGroupParameters:
analyseQuestionGroup(
gfdf,
h2bcQuestions,
grading,
title=title,
plotGraphs = True, printData = True, saveFiles = saveFiles
)
###Output
_____no_output_____
###Markdown
d)
###Code
h2bdQuestions = [
#'Represents the end of a device... TER', #20
QBBFunctionTER,
#'Represents the ability given... CDS', #22
QBBFunctionGameCDS,
#'Codes a protein... CDS', #24
QBBFunctionBiologyCDS,
#'Controls when the device is active... PR', #28
QBBFunctionPR,
#'Controls the level of expression, and thus how much the ability will be affected... RBS', #29
QBBFunctionRBS,
]
graphTitleStem = "biobrick function questions - "
analyseQuestionGroupParameters = [
(getBioBrickFunctionsQuestionsGrading(*baseGradingBase), graphTitleStem + "base grading"),
(getBioBrickFunctionsQuestionsGrading(*categoryLenientGradingBase), graphTitleStem + "category lenient grading"),
(getBioBrickFunctionsQuestionsGrading(*lenientGradingBase), graphTitleStem + "lenient grading"),
(getBioBrickFunctionsQuestionsGrading(*strictGradingBase), graphTitleStem + "strict grading"),
]
for grading, title in analyseQuestionGroupParameters:
analyseQuestionGroup(
gfdf,
h2bdQuestions,
grading,
title=title,
plotGraphs = True, printData = True, saveFiles = saveFiles
)
###Output
_____no_output_____
###Markdown
e)
###Code
h2beQuestions = [
#'What does this device do? RBS:PCONS:FLHDC:TER XXX', #32
QDeviceRbsPconsFlhdcTer,
#'What does this device do? PCONS:RBS:FLHDC:TER', #33
QDevicePconsRbsFlhdcTer,
#'What does this device do? GFP:RBS:PCONS:TER XXX', #36
QDeviceGfpRbsPconsTer,
#'What does this device do? PCONS:GFP:RBS:TER XXX', #37
QDevicePconsGfpRbsTer,
#'What does this device do? AMPR:RBS:PCONS:TER XXX', #38
QDeviceAmprRbsPconsTer,
#'What does this device do? RBS:PCONS:AMPR:TER XXX', #39
QDeviceRbsPconsAmprTer,
#'What does this device do? PBAD:RBS:GFP:TER', #34
QDevicePbadRbsGfpTer,
#'What does this device do? PBAD:GFP:RBS:TER XXX', #35
QDevicePbadGfpRbsTer,
#'What does this device do? GFP:RBS:PCONS:TER XXX', #36
#'Last question. Next page only contains remarks.Guess: you have crafted a functional device containing an arabinose-induced promoter and an arabinose Coding Sequence (CDS). What will happen?', #42
QDevicePbadRbsAraTer,
]
h2beStricterQuestions = [
#'What does this device do? PBAD:RBS:GFP:TER', #34
QDevicePbadRbsGfpTer,
#'What does this device do? PBAD:GFP:RBS:TER XXX', #35
QDevicePbadGfpRbsTer,
#'Last question. Next page only contains remarks.Guess: you have crafted a functional device containing an arabinose-induced promoter and an arabinose Coding Sequence (CDS). What will happen?', #42
QDevicePbadRbsAraTer,
]
h2beStrictestQuestions = [
#'Last question. Next page only contains remarks.Guess: you have crafted a functional device containing an arabinose-induced promoter and an arabinose Coding Sequence (CDS). What will happen?', #42
QDevicePbadRbsAraTer,
]
graphTitleStem1 = "broad-sense induction questions - "
graphTitleStem2 = "stricter-sense induction questions - "
graphTitleStem3 = "strictest-sense induction questions - "
analyseQuestionGroupParameters = [
(h2beQuestions,
getInductionQuestionsGrading(*baseGradingBase),
graphTitleStem1 + "base grading",
None,
),
(h2beQuestions,
getInductionQuestionsGrading(*lenientGradingBase),
graphTitleStem1 + "lenient grading",
None,
),
(h2beQuestions,
getInductionQuestionsGrading(*strictGradingBase),
graphTitleStem1 + "strict grading" ,
None,
),
]
analyseQuestionGroupParameters += [
(h2beStricterQuestions,
getQuestionsGradingSubset(h2beQuestions, h2beStricterQuestions, analyseQuestionGroupParameters[0][1]),
graphTitleStem2 + "base grading",
None,
),
(h2beStricterQuestions,
getQuestionsGradingSubset(h2beQuestions, h2beStricterQuestions, analyseQuestionGroupParameters[1][1]),
graphTitleStem2 + "lenient grading",
None,
),
(h2beStricterQuestions,
getQuestionsGradingSubset(h2beQuestions, h2beStricterQuestions, analyseQuestionGroupParameters[2][1]),
graphTitleStem2 + "strict grading",
None,
),
(h2beStrictestQuestions,
getQuestionsGradingSubset(h2beQuestions, h2beStrictestQuestions, analyseQuestionGroupParameters[0][1]),
graphTitleStem3 + "base grading",
baseMisconceptionCoding,
),
(h2beStrictestQuestions,
getQuestionsGradingSubset(h2beQuestions, h2beStrictestQuestions, analyseQuestionGroupParameters[1][1]),
graphTitleStem3 + "lenient grading",
lenientMisconceptionCoding,
),
(h2beStrictestQuestions,
getQuestionsGradingSubset(h2beQuestions, h2beStrictestQuestions, analyseQuestionGroupParameters[2][1]),
graphTitleStem3 + "strict grading",
strictMisconceptionCoding,
),
]
for questions, grading, title, coding in analyseQuestionGroupParameters:
analyseQuestionGroup(
gfdf,
questions,
grading,
title=title,
plotGraphs = True,
printData = True,
saveFiles = saveFiles,
qualitativeCoding = coding,
)
###Output
_____no_output_____ |
docs/python/algorithms/Decision_Tree_Classifier.ipynb | ###Markdown
---title: "Decision_Tree_Classifier"author: "Aavinash"date: 2020-09-04description: "-"type: technical_notedraft: false---
###Code
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:,[2,3]]
Y = iris.target
print(X[1:5,:])
print(Y)
## 1. Splitting the dataset
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size=0.3,random_state=0)
length_Train = len(X_train)
length_Test = len(X_test)
print("There are ",length_Train,"samples in the trainig set and",length_Test,"samples in the test set")
print("-----------------------------------------------------------------------------------------------")
print("")
## 2. Feature scaling.
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(X_train)
X_train_standard = sc.transform(X_train)
X_test_standard = sc.transform(X_test)
print("X_train without standardising features")
print("--------------------------------------")
print(X_train[1:5,:])
print("")
print("X_train standardising features")
print("--------------------------------------")
print(X_train_standard[1:5,:])
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier(criterion='entropy'
, max_depth = 3
, random_state = 0)
tree.fit(X_train,Y_train)
Y_pred_tree = tree.predict(X_test)
print("Accuracy: %.2f" % accuracy_score(Y_test,Y_pred_tree))
###Output
Accuracy: 0.98
|
nbs/elmed219-2020-getting-started.ipynb | ###Markdown
Getting started on the ELMED219-2020 Challenge> Alexander S. Lundervold, 30.12.19 Under construction! Check back later for more content. In this kernel we'll get you started by1. **Loading the data**. We'll use Pandas to load the training and test data2. **Investigating a few features**. We'll have a look at a few features in the data through simple statistics and plotting.3. **Explain how to submit predictions**. After you've created a model and produced predictions on the test set, you have to gather the predictions in a file that can be submitted to Kaggle for scoring. We'll show you how. _If you want to use this notebook as a starting point for your own work, you can just hit "Fork" to get started._ Another option is to use Colab: https://colab.research.google.com. To do that, (i) download the notebook to your own computer, (ii) go to https://colab.research.google.com, log in, and upload the notebook, (iii) share it with your team members. We recommend the Colab option. Setup You'll get very far just using the standard Python libraries for data science, imported here:
###Code
%matplotlib inline
import numpy as np, pandas as pd, seaborn as sns, matplotlib.pyplot as plt
from pathlib import Path
###Output
_____no_output_____
###Markdown
Load the data Here are three options: 1. For working locally2. Working in a Kaggle kernel3. Using Colab If you're working locally You can download the data via the Data tab on the Kaggle competition home page. If you're using Kaggle: The data is stored in the `input` folder:
###Code
DATA = Path('../input/elmed219-2020')
list(DATA.iterdir())
###Output
_____no_output_____
###Markdown
There are five files: the training data, the test data, a sample submission file, a file containing Freesurfer outputs for subjects scanned multiple times, and a lookup table for Freesurfer (useful when plotting the Freesurfer outputs to better understand what the different features are).
###Code
import pickle
train = pd.read_csv(DATA/'train.csv')
test = pd.read_csv(DATA/'test.csv')
sampleSubmission = pd.read_csv(DATA/'sampleSubmission.csv')
slim_longitudinal = pd.read_csv(DATA/'SLIM_longitudinal_info.csv')
with open(DATA/'FreesurferLUT.pkl', 'rb') as f:
FS_LUT = pickle.load(f)
###Output
_____no_output_____
###Markdown
If you're using Colab: Uncomment and run the following cell instead of the two above cells:
###Code
#train = pd.read_csv('https://github.com/MMIV-ML/ELMED219-2020/raw/master/KaggleInClass/train.csv')
#test = pd.read_csv('https://github.com/MMIV-ML/ELMED219-2020/raw/master/KaggleInClass/test.csv')
#sampleSubmission = pd.read_csv('https://raw.githubusercontent.com/MMIV-ML/ELMED219-2020/master/KaggleInClass/sampleSubmission.csv')
#slim_longitudinal = pd.read_csv('https://raw.githubusercontent.com/MMIV-ML/ELMED219-2020/master/KaggleInClass/SLIM_longitudinal_info.csv')
#with open('https://github.com/MMIV-ML/ELMED219-2020/raw/master/KaggleInClass/FreesurferLUT.pkl', 'rb') as f:
# FS_LUT = pickle.load(f)
###Output
_____no_output_____
###Markdown
Take a look at the data
###Code
train.info()
test.info()
train.head()
test.head()
###Output
_____no_output_____
###Markdown
In the training data we have our target feature `Age` at the end, while it's missing in the test data. We want to make a model that can predict `Age` from the other features in the test data. Additional data
###Code
FS_LUT
###Output
_____no_output_____
###Markdown
The longitudinal file with data from SLIM contains 18 subjects scanned three times each. More on this later.
###Code
slim_longitudinal.head()
###Output
_____no_output_____
###Markdown
Basic exploration Here are a few basic plots to get you started:
###Code
train.Age.hist()
plt.show()
train.FieldStrength.value_counts().plot(kind='bar')
plt.show()
counts = list(train.FieldStrength.value_counts().values) + [train.FieldStrength.isna().sum(),]
labels = ["3T", "1.5T", "Unknown"]
plt.ax = plt.subplots(1, 1, figsize=(12,6))
plt.pie(counts, explode=[0.1, 0.1, 0.1], labels=labels, startangle=90, autopct='%1.1f%%', shadow=True)
plt.show()
fig = sns.boxplot(x='Sex', y='EstimatedTotalIntraCranialVol', data=train)
###Output
_____no_output_____
###Markdown
Feature selection and feature engineering We leave this to you... BrainAge model As an illustration, here's a very simple approach to create a model and generate some predictions. _You work to improve this!_
###Code
train.head()
# Collect all the numerical Freesurfer features in the train and test set:
X = train.drop(columns=['SubjectID', 'Source', 'ID', 'Sex', 'Height', 'Weight', 'FieldStrength', 'Handedness', 'Age'])
y = train.Age
###Output
_____no_output_____
###Markdown
Split off some data to be used to check our model's performance before submission:
###Code
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
# We'll use a random forest model:
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators=10, random_state=42, n_jobs=-1)
rf.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Check the mean absolute error on the validation set:
###Code
from sklearn.metrics import mean_absolute_error
y_pred = rf.predict(X_val)
mean_absolute_error(y_val, y_pred)
###Output
_____no_output_____
###Markdown
It's useful to plot our predictions agains the actual ages in the validation set:
###Code
plt.scatter(y_val, y_pred)
plt.show()
###Output
_____no_output_____
###Markdown
Perfect predictions would generate a diagonal line from the bottom left to the top right. We see that our model is not totally useless, but also that it's far from perfect. It's also useful to take a look at the residuals `y_val - y_preds`. Ideally, these would be close to zero.
###Code
y_residuals = y_val - y_pred
y_residuals[:10]
f, ax = plt.subplots(1,1,figsize=(12,8))
ax.scatter(y_val, y_residuals)
ax.axhline(y=0, xmin=0, xmax=100, linestyle='--', color='#b2b2b2')
plt.show()
###Output
_____no_output_____
###Markdown
The relatively large spread around the above diagonal is captured in the large standard deviation of the residuals:
###Code
y_residuals.std()
###Output
_____no_output_____
###Markdown
Submission After training your model and found predictions for the test data you have to create a CSV file containing `SubjectID` and your predictions in its two columns. Let's produce some predictions using the above model:
###Code
test.head()
# We have to drop the columns not used when constructing the model:
X_test = test.drop(columns=['SubjectID', 'Sex'])
predicted_ages = rf.predict(X_test)
# We get 625 predictions:
predicted_ages.shape
# Here are the first 10 predictions
predicted_ages[:10]
###Output
_____no_output_____
###Markdown
Here's one way to make a submission file:**
###Code
submission = pd.DataFrame({'ID': test.SubjectID, 'label': predicted_ages})
submission.head()
submission.to_csv('submission.csv', index=False)
###Output
_____no_output_____ |
exam/.ipynb_checkpoints/exam-checkpoint.ipynb | ###Markdown
exam of Statistics &Machine LearningThe goal of this exam is perform an analysis on data related to heart disease,in particular, we want to explore the relationship between a `target` variable - whether patient has a heart disease or not - and several other variables such as cholesterol level, age, ...The data is present in the file `'heartData_simplified.csv'`, which is a cleaned and simplified version of the [UCI heart disease data set](https://archive.ics.uci.edu/ml/datasets/heart+Disease)We ask that you explore the data-set and answer the questions in a commented jupyter notebook. You should send your code to **xxx@xxx** by the **xxx**.Comment your code to explain to us your thought process and detail your conclusions following the analysis. description of the columns* age : Patient age in years* sex : Patient sex* chol : Cholesterol level in mg/dl. * thalach : Maxium heart rate during the stress test* oldpeak : Decrease of the ST segment during exercise according to the same one on rest.* ca : Number of main blood vessels coloured by the radioactive dye. The number varies between 0 to 3.* thal : Results of the blood flow observed via the radioactive dye. * defect -> fixed defect (no blood flow in some part of the heart) * normal -> normal blood flow * reversible -> reversible defect (a blood flow is observed but it is not normal)* target : Whether the patient has a heart disease or not**code to read the data**
###Code
import pandas as pd
df = pd.read_table('heartData_simplified.csv',sep=',')
df.head()
print(len(df))
###Output
296
###Markdown
You can see that we have to handle categorical variables : `sex` , `thal`, and our response variable : `target`.If taken as-is, they make create problems in models. So we will first transform them to sets of 0/1 columns :
###Code
# in this first line, I make sure that "normal" becomes the default value for the thal columns
df['thal'] = df['thal'].astype(pd.CategoricalDtype(categories=["normal", "reversible", "defect"],ordered=True))
# get dummies will transform these categorical columns to sets of 0/1 columns
df = pd.get_dummies( df , drop_first=True )
df.head()
###Output
_____no_output_____
###Markdown
So now, we can see that `sex` has been replaced by `sex_male`, a column where 1 means male, and 0 means female (so female is the default value, and in subsequent models, the parameter associated with this column will represent to effect associated with being male).Similarly, `target` has been replaced with `target_yes` : 0 means no heart disease, 1 means presence of heart disease.Finally, `thal` has actualy been replace by 2 columns: this is how categorical variables with more than 2 levels are represented. A 1 in one of these to column means that the blood flow is reversible of fixed (depending on which column the 1 is).When `thal_reversible` and `thal_defect` are at 0, it means that blood flow is normal, the "default" value (note that there is no case when they are both 1). **Instructions**Your mission is to implement a machine learning pipeline in order to predict `target_yes`.Try to choose a relevant approach regarding : * the split of your data into train and test sets * the metric you would like to optimizeTry a logistic regression and a decision tree approach. * Which one yields the best result here ? * If you had to remove 3 measurements out of this analysis, which one would you use? * try to describe briefly (in 3 or 4 sentences) which insights your model could yield about the relationship betwen heart disease and the coraviables.
###Code
coVariables = df.drop('target_yes' , axis=1)
response = df.target_yes
###Output
_____no_output_____ |
examples/mp/jupyter/sports_scheduling.ipynb | ###Markdown
Use decision optimization to help a sports league schedule its gamesThis tutorial includes everything you need to set up decision optimization engines, build mathematical programming models, and arrive at a good working schedule for a sports league's games.When you finish this tutorial, you'll have a foundational knowledge of _Prescriptive Analytics_.>This notebook is part of [Prescriptive Analytics for Python](http://ibmdecisionoptimization.github.io/docplex-doc/)>>It requires either an [installation of CPLEX Optimizers](http://ibmdecisionoptimization.github.io/docplex-doc/getting_started.html) or it can be run on [IBM Watson Studio Cloud](https://www.ibm.com/cloud/watson-studio/>) (Sign up for a [free IBM Cloud account](https://dataplatform.cloud.ibm.com/registration/stepone?context=wdp&apps=all>)and you can start using Watson Studio Cloud right away).Table of contents:- [The business problem](The-business-problem:--Games-Scheduling-in-the-National-Football-League)* [How decision optimization (prescriptive analytics) can help](How--decision-optimization-can-help)* [Use decision optimization](Use-decision-optimization) * [Step 1: Import the library](Step-1:-Import-the-library) - [Step 2: Model the Data](Step-2:-Model-the-data) * [Step 3: Prepare the data](Step-3:-Prepare-the-data) - [Step 4: Set up the prescriptive model](Step-4:-Set-up-the-prescriptive-model) * [Define the decision variables](Define-the-decision-variables) * [Express the business constraints](Express-the-business-constraints) * [Express the objective](Express-the-objective) * [Solve with Decision Optimization](Solve-with-Decision-Optimization) * [Step 5: Investigate the solution and run an example analysis](Step-5:-Investigate-the-solution-and-then-run-an-example-analysis)* [Summary](Summary) The business problem: Games Scheduling in the National Football League * A sports league with two divisions must schedule games so that each team plays every team within its division a given number of times, and each team plays teams in the other division a given number of times.* A team plays exactly one game each week. * A pair of teams cannot play each other on consecutive weeks.* While a third of a team's intradivisional games must be played in the first half of the season, the preference is for intradivisional games to be held as late as possible in the season. * To model this preference, there is an incentive for intradivisional games that increases each week as a square of the week. * An opponent must be assigned to each team each week to maximize the total of the incentives.. This is a type of discrete optimization problem that can be solved by using either **Integer Programming** (IP) or **Constraint Programming** (CP). > **Integer Programming** is the class of problems defined as the optimization of a linear function, subject to linear constraints over integer variables. > **Constraint Programming** problems generally have discrete decision variables, but the constraints can be logical, and the arithmetic expressions are not restricted to being linear. For the purposes of this tutorial, we will illustrate a solution with mathematical programming (MIP). How decision optimization can help* Prescriptive analytics (decision optimization) technology recommends actions that are based on desired outcomes. It takes into account specific scenarios, resources, and knowledge of past and current events. With this insight, your organization can make better decisions and have greater control of business outcomes. * Prescriptive analytics is the next step on the path to insight-based actions. It creates value through synergy with predictive analytics, which analyzes data to predict future outcomes. * Prescriptive analytics takes that insight to the next level by suggesting the optimal way to handle that future situation. Organizations that can act fast in dynamic conditions and make superior decisions in uncertain environments gain a strong competitive advantage. With prescriptive analytics, you can: * Automate the complex decisions and trade-offs to better manage your limited resources.* Take advantage of a future opportunity or mitigate a future risk.* Proactively update recommendations based on changing events.* Meet operational goals, increase customer loyalty, prevent threats and fraud, and optimize business processes. Use decision optimization Step 1: Import the libraryRun the following code to import the Decision Optimization CPLEX Modeling library. The *DOcplex* library contains the two modeling packages, Mathematical Programming (docplex.mp) and Constraint Programming (docplex.cp).
###Code
import sys
try:
import docplex.mp
except:
raise Exception('Please install docplex. See https://pypi.org/project/docplex/')
###Output
_____no_output_____
###Markdown
If *CPLEX* is not installed, install CPLEX Community edition.
###Code
try:
import cplex
except:
raise Exception('Please install CPLEX. See https://pypi.org/project/cplex/')
###Output
_____no_output_____
###Markdown
Step 2: Model the dataIn this scenario, the data is simple. There are eight teams in each division, and the teams must play each team in the division once and each team outside the division once.Use a Python module, *Collections*, which implements some data structures that will help solve some problems. *Named tuples* helps to define meaning of each position in a tuple. This helps the code be more readable and self-documenting. You can use named tuples in any place where you use tuples. In this example, you create a *namedtuple* to contain information for points. You are also defining some of the parameters.
###Code
# Teams in 1st division
team_div1 = ["Baltimore Ravens","Cincinnati Bengals", "Cleveland Browns","Pittsburgh Steelers","Houston Texans",
"Indianapolis Colts","Jacksonville Jaguars","Tennessee Titans","Buffalo Bills","Miami Dolphins",
"New England Patriots","New York Jets","Denver Broncos","Kansas City Chiefs","Oakland Raiders",
"San Diego Chargers"]
# Teams in 2nd division
team_div2 = ["Chicago Bears","Detroit Lions","Green Bay Packers","Minnesota Vikings","Atlanta Falcons",
"Carolina Panthers","New Orleans Saints","Tampa Bay Buccaneers","Dallas Cowboys","New York Giants",
"Philadelphia Eagles","Washington Redskins","Arizona Cardinals","San Francisco 49ers",
"Seattle Seahawks","St. Louis Rams"]
#number_of_matches_to_play = 1 # Number of match to play between two teams on the league
# Schedule parameters
nb_teams_in_division = 5
max_teams_in_division = 10
number_of_matches_inside_division = 1
number_of_matches_outside_division = 1
###Output
_____no_output_____
###Markdown
Use basic HTML and a stylesheet to format the data.
###Code
CSS = """
body {
margin: 0;
font-family: Helvetica;
}
table.dataframe {
border-collapse: collapse;
border: none;
}
table.dataframe tr {
border: none;
}
table.dataframe td, table.dataframe th {
margin: 0;
border: 1px solid white;
padding-left: 0.25em;
padding-right: 0.25em;
}
table.dataframe th:not(:empty) {
background-color: #fec;
text-align: left;
font-weight: normal;
}
table.dataframe tr:nth-child(2) th:empty {
border-left: none;
border-right: 1px dashed #888;
}
table.dataframe td {
border: 2px solid #ccf;
background-color: #f4f4ff;
}
table.dataframe thead th:first-child {
display: none;
}
table.dataframe tbody th {
display: none;
}
"""
from IPython.core.display import HTML
HTML('<style>{}</style>'.format(CSS))
###Output
_____no_output_____
###Markdown
Now you will import the *pandas* library. Pandas is an open source Python library for data analysis. It uses two data structures, *Series* and *DataFrame*, which are built on top of *NumPy*.A **Series** is a one-dimensional object similar to an array, list, or column in a table. It will assign a labeled index to each item in the series. By default, each item receives an index label from 0 to N, where N is the length of the series minus one.A **DataFrame** is a tabular data structure comprised of rows and columns, similar to a spreadsheet, database table, or R's data.frame object. Think of a DataFrame as a group of Series objects that share an index (the column names).In the example, each division (the AFC and the NFC) is part of a DataFrame.
###Code
import pandas as pd
team1 = pd.DataFrame(team_div1)
team2 = pd.DataFrame(team_div2)
team1.columns = ["AFC"]
team2.columns = ["NFC"]
teams = pd.concat([team1,team2], axis=1)
###Output
_____no_output_____
###Markdown
The following *display* function is a tool to show different representations of objects. When you issue the *display(teams)* command, you are sending the output to the notebook so that the result is stored in the document.
###Code
from IPython.display import display
display(teams)
###Output
_____no_output_____
###Markdown
Step 3: Prepare the dataGiven the number of teams in each division and the number of intradivisional and interdivisional games to be played, you can calculate the total number of teams and the number of weeks in the schedule, assuming every team plays exactly one game per week. The season is split into halves, and the number of the intradivisional games that each team must play in the first half of the season is calculated.
###Code
import numpy as np
nb_teams = 2 * nb_teams_in_division
teams = range(nb_teams)
# Calculate the number of weeks necessary
nb_inside_div = (nb_teams_in_division - 1) * number_of_matches_inside_division
nb_outside_div = nb_teams_in_division * number_of_matches_outside_division
nb_weeks = nb_inside_div + nb_outside_div
# Weeks to schedule
weeks = range(nb_weeks)
# Season is split into two halves
first_half_weeks = range(int(np.floor(nb_weeks / 2)))
nb_first_half_games = int(np.floor(nb_weeks / 3))
from collections import namedtuple
match = namedtuple("match",["team1","team2","is_divisional"])
matches = {match(t1,t2, 1 if ( t2 <= nb_teams_in_division or t1 > nb_teams_in_division) else 0)
for t1 in teams for t2 in teams if t1 < t2}
###Output
_____no_output_____
###Markdown
Number of games to play between pairs depends on whether the pairing is intradivisional or not.
###Code
nb_play = { m : number_of_matches_inside_division if m.is_divisional==1
else number_of_matches_outside_division
for m in matches}
###Output
_____no_output_____
###Markdown
Step 4: Set up the prescriptive model
###Code
from docplex.mp.environment import Environment
env = Environment()
env.print_information()
###Output
_____no_output_____
###Markdown
Create the DOcplex modelThe model contains all the business constraints and defines the objective.
###Code
from docplex.mp.model import Model
mdl = Model("sports")
###Output
_____no_output_____
###Markdown
Define the decision variables
###Code
plays = mdl.binary_var_matrix(matches, weeks, lambda ij: "x_%s_%d" %(str(ij[0]), ij[1]))
###Output
_____no_output_____
###Markdown
Express the business constraints Each pair of teams must play the correct number of games.
###Code
mdl.add_constraints( mdl.sum(plays[m,w] for w in weeks) == nb_play[m]
for m in matches)
mdl.print_information()
###Output
_____no_output_____
###Markdown
Each team must play exactly once in a week.
###Code
mdl.add_constraints( mdl.sum(plays[m,w] for m in matches if (m.team1 == t or m.team2 == t) ) == 1
for w in weeks for t in teams)
mdl.print_information()
###Output
_____no_output_____
###Markdown
Games between the same teams cannot be on successive weeks.
###Code
mdl.add_constraints( plays[m,w] + plays[m,w+1] <= 1
for w in weeks
for m in matches
if w < nb_weeks-1)
mdl.print_information()
###Output
_____no_output_____
###Markdown
Some intradivisional games should be in the first half.
###Code
mdl.add_constraints( mdl.sum(plays[m,w] for w in first_half_weeks for m in matches
if (((m.team1 == t or m.team2 == t) and m.is_divisional == 1 )))
>= nb_first_half_games
for t in teams)
mdl.print_information()
###Output
_____no_output_____
###Markdown
Express the objectiveThe objective function for this example is designed to force intradivisional games to occur as late in the season as possible. The incentive for intradivisional games increases by week. There is no incentive for interdivisional games.
###Code
gain = { w : w*w for w in weeks}
# If an intradivisional pair plays in week w, Gain[w] is added to the objective.
mdl.maximize( mdl.sum (m.is_divisional * gain[w] * plays[m,w] for m in matches for w in weeks) )
###Output
_____no_output_____
###Markdown
Solve with Decision Optimization You will get the best solution found after n seconds, due to a time limit parameter.
###Code
mdl.print_information()
assert mdl.solve(), "!!! Solve of the model fails"
mdl.report()
###Output
_____no_output_____
###Markdown
Step 5: Investigate the solution and then run an example analysis Determine which of the scheduled games will be a replay of one of the last 10 Super Bowls.We start by creating a pandas DataFrame that contains the year and teams who played the last 10 Super Bowls.
###Code
try: # Python 2
team_league = dict({t : team_div1[t] for t in range(nb_teams_in_division) }.items() + \
{t+nb_teams_in_division : team_div2[t] for t in range(nb_teams_in_division) }.items()
)
except: # Python 3
team_league = dict(list({t : team_div1[t] for t in range(nb_teams_in_division) }.items()) + \
list({t+nb_teams_in_division : team_div2[t] for t in range(nb_teams_in_division) }.items()))
sol = namedtuple("solution",["week","is_divisional", "team1", "team2"])
solution = [sol(w, m.is_divisional, team_league[m.team1], team_league[m.team2]) for m in matches for w in weeks if plays[m,w].solution_value == 1]
nfl_finals = [("2016", "Carolina Panthers", "Denver Broncos"),
("2015", "New England Patriots", "Seattle Seahawks"),
("2014", "Seattle Seahawks", "Denver Broncos"),
("2013", "Baltimore Ravens", "San Francisco 49ers"),
("2012", "New York Giants", "New England Patriots "),
("2011", "Green Bay Packers", "Pittsburgh Steelers"),
("2010", "New Orleans Saints", "Indianapolis Colts"),
("2009", "Pittsburgh Steelers", "Arizona Cardinals"),
("2008", "New York Giants", "New England Patriots"),
("2007", "Indianapolis Colts", "Chicago Bears")
]
nfl_meetings = {(t[1], t[2]) for t in nfl_finals}
winners_bd = pd.DataFrame(nfl_finals)
winners_bd.columns = ["year", "team1", "team2"]
display(winners_bd)
###Output
_____no_output_____
###Markdown
We now look for the games in our solution that are replays of one of the past 10 Super Bowls.
###Code
months = ["January", "February", "March", "April", "May", "June",
"July", "August", "September", "October", "November", "December"]
report = []
for m in solution:
if (m.team1, m.team2) in nfl_meetings:
report.append((m.week, months[m.week//4], m.team1, m.team2))
if (m.team2, m.team1) in nfl_meetings:
report.append((m.week, months[m.week//4], m.team2, m.team1))
print(report)
matches_bd = pd.DataFrame(report)
matches_bd.columns = ["week", "Month", "Team1", "Team2"]
try: #pandas >= 0.17
display(matches_bd.sort_values(by='week'))
except:
display(matches_bd.sort('week'))
###Output
_____no_output_____
###Markdown
Use decision optimization to help a sports league schedule its gamesThis tutorial includes everything you need to set up decision optimization engines, build mathematical programming models, and arrive at a good working schedule for a sports league's games.When you finish this tutorial, you'll have a foundational knowledge of _Prescriptive Analytics_.>This notebook is part of [Prescriptive Analytics for Python](http://ibmdecisionoptimization.github.io/docplex-doc/)>>It requires either an [installation of CPLEX Optimizers](http://ibmdecisionoptimization.github.io/docplex-doc/getting_started.html) or it can be run on [IBM Watson Studio Cloud](https://www.ibm.com/cloud/watson-studio/) (Sign up for a [free IBM Cloud account](https://dataplatform.cloud.ibm.com/registration/stepone?context=wdp&apps=all>)and you can start using Watson Studio Cloud right away).Table of contents:- [The business problem](The-business-problem:--Games-Scheduling-in-the-National-Football-League)* [How decision optimization (prescriptive analytics) can help](How--decision-optimization-can-help)* [Use decision optimization](Use-decision-optimization) * [Step 1: Import the library](Step-1:-Import-the-library) - [Step 2: Model the Data](Step-2:-Model-the-data) * [Step 3: Prepare the data](Step-3:-Prepare-the-data) - [Step 4: Set up the prescriptive model](Step-4:-Set-up-the-prescriptive-model) * [Define the decision variables](Define-the-decision-variables) * [Express the business constraints](Express-the-business-constraints) * [Express the objective](Express-the-objective) * [Solve with Decision Optimization](Solve-with-Decision-Optimization) * [Step 5: Investigate the solution and run an example analysis](Step-5:-Investigate-the-solution-and-then-run-an-example-analysis)* [Summary](Summary) The business problem: Games Scheduling in the National Football League * A sports league with two divisions must schedule games so that each team plays every team within its division a given number of times, and each team plays teams in the other division a given number of times.* A team plays exactly one game each week. * A pair of teams cannot play each other on consecutive weeks.* While a third of a team's intradivisional games must be played in the first half of the season, the preference is for intradivisional games to be held as late as possible in the season. * To model this preference, there is an incentive for intradivisional games that increases each week as a square of the week. * An opponent must be assigned to each team each week to maximize the total of the incentives.. This is a type of discrete optimization problem that can be solved by using either **Integer Programming** (IP) or **Constraint Programming** (CP). > **Integer Programming** is the class of problems defined as the optimization of a linear function, subject to linear constraints over integer variables. > **Constraint Programming** problems generally have discrete decision variables, but the constraints can be logical, and the arithmetic expressions are not restricted to being linear. For the purposes of this tutorial, we will illustrate a solution with mathematical programming (MIP). How decision optimization can help* Prescriptive analytics (decision optimization) technology recommends actions that are based on desired outcomes. It takes into account specific scenarios, resources, and knowledge of past and current events. With this insight, your organization can make better decisions and have greater control of business outcomes. * Prescriptive analytics is the next step on the path to insight-based actions. It creates value through synergy with predictive analytics, which analyzes data to predict future outcomes. * Prescriptive analytics takes that insight to the next level by suggesting the optimal way to handle that future situation. Organizations that can act fast in dynamic conditions and make superior decisions in uncertain environments gain a strong competitive advantage. With prescriptive analytics, you can: * Automate the complex decisions and trade-offs to better manage your limited resources.* Take advantage of a future opportunity or mitigate a future risk.* Proactively update recommendations based on changing events.* Meet operational goals, increase customer loyalty, prevent threats and fraud, and optimize business processes. Use decision optimization Step 1: Import the libraryRun the following code to import the Decision Optimization CPLEX Modeling library. The *DOcplex* library contains the two modeling packages, Mathematical Programming (docplex.mp) and Constraint Programming (docplex.cp).
###Code
import sys
try:
import docplex.mp
except:
raise Exception('Please install docplex. See https://pypi.org/project/docplex/')
###Output
_____no_output_____
###Markdown
If *CPLEX* is not installed, install CPLEX Community edition.
###Code
try:
import cplex
except:
raise Exception('Please install CPLEX. See https://pypi.org/project/cplex/')
###Output
_____no_output_____
###Markdown
Step 2: Model the dataIn this scenario, the data is simple. There are eight teams in each division, and the teams must play each team in the division once and each team outside the division once.Use a Python module, *Collections*, which implements some data structures that will help solve some problems. *Named tuples* helps to define meaning of each position in a tuple. This helps the code be more readable and self-documenting. You can use named tuples in any place where you use tuples. In this example, you create a *namedtuple* to contain information for points. You are also defining some of the parameters.
###Code
# Teams in 1st division
team_div1 = ["Baltimore Ravens","Cincinnati Bengals", "Cleveland Browns","Pittsburgh Steelers","Houston Texans",
"Indianapolis Colts","Jacksonville Jaguars","Tennessee Titans","Buffalo Bills","Miami Dolphins",
"New England Patriots","New York Jets","Denver Broncos","Kansas City Chiefs","Oakland Raiders",
"San Diego Chargers"]
# Teams in 2nd division
team_div2 = ["Chicago Bears","Detroit Lions","Green Bay Packers","Minnesota Vikings","Atlanta Falcons",
"Carolina Panthers","New Orleans Saints","Tampa Bay Buccaneers","Dallas Cowboys","New York Giants",
"Philadelphia Eagles","Washington Redskins","Arizona Cardinals","San Francisco 49ers",
"Seattle Seahawks","St. Louis Rams"]
#number_of_matches_to_play = 1 # Number of match to play between two teams on the league
# Schedule parameters
nb_teams_in_division = 5
max_teams_in_division = 10
number_of_matches_inside_division = 1
number_of_matches_outside_division = 1
###Output
_____no_output_____
###Markdown
Use basic HTML and a stylesheet to format the data.
###Code
CSS = """
body {
margin: 0;
font-family: Helvetica;
}
table.dataframe {
border-collapse: collapse;
border: none;
}
table.dataframe tr {
border: none;
}
table.dataframe td, table.dataframe th {
margin: 0;
border: 1px solid white;
padding-left: 0.25em;
padding-right: 0.25em;
}
table.dataframe th:not(:empty) {
background-color: #fec;
text-align: left;
font-weight: normal;
}
table.dataframe tr:nth-child(2) th:empty {
border-left: none;
border-right: 1px dashed #888;
}
table.dataframe td {
border: 2px solid #ccf;
background-color: #f4f4ff;
}
table.dataframe thead th:first-child {
display: none;
}
table.dataframe tbody th {
display: none;
}
"""
from IPython.core.display import HTML
HTML('<style>{}</style>'.format(CSS))
###Output
_____no_output_____
###Markdown
Now you will import the *pandas* library. Pandas is an open source Python library for data analysis. It uses two data structures, *Series* and *DataFrame*, which are built on top of *NumPy*.A **Series** is a one-dimensional object similar to an array, list, or column in a table. It will assign a labeled index to each item in the series. By default, each item receives an index label from 0 to N, where N is the length of the series minus one.A **DataFrame** is a tabular data structure comprised of rows and columns, similar to a spreadsheet, database table, or R's data.frame object. Think of a DataFrame as a group of Series objects that share an index (the column names).In the example, each division (the AFC and the NFC) is part of a DataFrame.
###Code
import pandas as pd
team1 = pd.DataFrame(team_div1)
team2 = pd.DataFrame(team_div2)
team1.columns = ["AFC"]
team2.columns = ["NFC"]
teams = pd.concat([team1,team2], axis=1)
###Output
_____no_output_____
###Markdown
The following *display* function is a tool to show different representations of objects. When you issue the *display(teams)* command, you are sending the output to the notebook so that the result is stored in the document.
###Code
from IPython.display import display
display(teams)
###Output
_____no_output_____
###Markdown
Step 3: Prepare the dataGiven the number of teams in each division and the number of intradivisional and interdivisional games to be played, you can calculate the total number of teams and the number of weeks in the schedule, assuming every team plays exactly one game per week. The season is split into halves, and the number of the intradivisional games that each team must play in the first half of the season is calculated.
###Code
import numpy as np
nb_teams = 2 * nb_teams_in_division
teams = range(nb_teams)
# Calculate the number of weeks necessary
nb_inside_div = (nb_teams_in_division - 1) * number_of_matches_inside_division
nb_outside_div = nb_teams_in_division * number_of_matches_outside_division
nb_weeks = nb_inside_div + nb_outside_div
# Weeks to schedule
weeks = range(nb_weeks)
# Season is split into two halves
first_half_weeks = range(int(np.floor(nb_weeks / 2)))
nb_first_half_games = int(np.floor(nb_weeks / 3))
from collections import namedtuple
match = namedtuple("match",["team1","team2","is_divisional"])
matches = {match(t1,t2, 1 if ( t2 <= nb_teams_in_division or t1 > nb_teams_in_division) else 0)
for t1 in teams for t2 in teams if t1 < t2}
###Output
_____no_output_____
###Markdown
Number of games to play between pairs depends on whether the pairing is intradivisional or not.
###Code
nb_play = { m : number_of_matches_inside_division if m.is_divisional==1
else number_of_matches_outside_division
for m in matches}
###Output
_____no_output_____
###Markdown
Step 4: Set up the prescriptive model
###Code
from docplex.mp.environment import Environment
env = Environment()
env.print_information()
###Output
_____no_output_____
###Markdown
Create the DOcplex modelThe model contains all the business constraints and defines the objective.
###Code
from docplex.mp.model import Model
mdl = Model("sports")
###Output
_____no_output_____
###Markdown
Define the decision variables
###Code
plays = mdl.binary_var_matrix(matches, weeks, lambda ij: "x_%s_%d" %(str(ij[0]), ij[1]))
###Output
_____no_output_____
###Markdown
Express the business constraints Each pair of teams must play the correct number of games.
###Code
mdl.add_constraints( mdl.sum(plays[m,w] for w in weeks) == nb_play[m]
for m in matches)
mdl.print_information()
###Output
_____no_output_____
###Markdown
Each team must play exactly once in a week.
###Code
mdl.add_constraints( mdl.sum(plays[m,w] for m in matches if (m.team1 == t or m.team2 == t) ) == 1
for w in weeks for t in teams)
mdl.print_information()
###Output
_____no_output_____
###Markdown
Games between the same teams cannot be on successive weeks.
###Code
mdl.add_constraints( plays[m,w] + plays[m,w+1] <= 1
for w in weeks
for m in matches
if w < nb_weeks-1)
mdl.print_information()
###Output
_____no_output_____
###Markdown
Some intradivisional games should be in the first half.
###Code
mdl.add_constraints( mdl.sum(plays[m,w] for w in first_half_weeks for m in matches
if (((m.team1 == t or m.team2 == t) and m.is_divisional == 1 )))
>= nb_first_half_games
for t in teams)
mdl.print_information()
###Output
_____no_output_____
###Markdown
Express the objectiveThe objective function for this example is designed to force intradivisional games to occur as late in the season as possible. The incentive for intradivisional games increases by week. There is no incentive for interdivisional games.
###Code
gain = { w : w*w for w in weeks}
# If an intradivisional pair plays in week w, Gain[w] is added to the objective.
mdl.maximize( mdl.sum (m.is_divisional * gain[w] * plays[m,w] for m in matches for w in weeks) )
###Output
_____no_output_____
###Markdown
Solve with Decision Optimization You will get the best solution found after n seconds, due to a time limit parameter.
###Code
mdl.print_information()
assert mdl.solve(), "!!! Solve of the model fails"
mdl.report()
###Output
_____no_output_____
###Markdown
Step 5: Investigate the solution and then run an example analysis Determine which of the scheduled games will be a replay of one of the last 10 Super Bowls.We start by creating a pandas DataFrame that contains the year and teams who played the last 10 Super Bowls.
###Code
try: # Python 2
team_league = dict({t : team_div1[t] for t in range(nb_teams_in_division) }.items() + \
{t+nb_teams_in_division : team_div2[t] for t in range(nb_teams_in_division) }.items()
)
except: # Python 3
team_league = dict(list({t : team_div1[t] for t in range(nb_teams_in_division) }.items()) + \
list({t+nb_teams_in_division : team_div2[t] for t in range(nb_teams_in_division) }.items()))
sol = namedtuple("solution",["week","is_divisional", "team1", "team2"])
solution = [sol(w, m.is_divisional, team_league[m.team1], team_league[m.team2]) for m in matches for w in weeks if plays[m,w].solution_value == 1]
nfl_finals = [("2016", "Carolina Panthers", "Denver Broncos"),
("2015", "New England Patriots", "Seattle Seahawks"),
("2014", "Seattle Seahawks", "Denver Broncos"),
("2013", "Baltimore Ravens", "San Francisco 49ers"),
("2012", "New York Giants", "New England Patriots "),
("2011", "Green Bay Packers", "Pittsburgh Steelers"),
("2010", "New Orleans Saints", "Indianapolis Colts"),
("2009", "Pittsburgh Steelers", "Arizona Cardinals"),
("2008", "New York Giants", "New England Patriots"),
("2007", "Indianapolis Colts", "Chicago Bears")
]
nfl_meetings = {(t[1], t[2]) for t in nfl_finals}
winners_bd = pd.DataFrame(nfl_finals)
winners_bd.columns = ["year", "team1", "team2"]
display(winners_bd)
###Output
_____no_output_____
###Markdown
We now look for the games in our solution that are replays of one of the past 10 Super Bowls.
###Code
months = ["January", "February", "March", "April", "May", "June",
"July", "August", "September", "October", "November", "December"]
report = []
for m in solution:
if (m.team1, m.team2) in nfl_meetings:
report.append((m.week, months[m.week//4], m.team1, m.team2))
if (m.team2, m.team1) in nfl_meetings:
report.append((m.week, months[m.week//4], m.team2, m.team1))
print(report)
matches_bd = pd.DataFrame(report)
matches_bd.columns = ["week", "Month", "Team1", "Team2"]
try: #pandas >= 0.17
display(matches_bd.sort_values(by='week'))
except:
display(matches_bd.sort('week'))
###Output
_____no_output_____
###Markdown
Use decision optimization to help a sports league schedule its gamesThis tutorial includes everything you need to set up decision optimization engines, build mathematical programming models, and arrive at a good working schedule for a sports league's games.When you finish this tutorial, you'll have a foundational knowledge of _Prescriptive Analytics_.>This notebook is part of the [Prescriptive Analytics for Python](https://rawgit.com/IBMDecisionOptimization/docplex-doc/master/docs/index.html)>Running the sample requires the installation of [CPLEX Optimization studio](https://www.ibm.com/products/ilog-cplex-optimization-studio) (Commercial or free [CPLEX Community edition](https://www.ibm.com/account/reg/us-en/signup?formid=urx-20028>`)). This sample automatically installs *CPLEX CE* if needed.Table of contents:- [The business problem](The-business-problem:--Games-Scheduling-in-the-National-Football-League)* [How decision optimization (prescriptive analytics) can help](How--decision-optimization-can-help)* [Use decision optimization](Use-decision-optimization) * [Step 1: Import the library](Step-1:-Import-the-library) - [Step 2: Model the Data](Step-2:-Model-the-data) * [Step 3: Prepare the data](Step-3:-Prepare-the-data) - [Step 4: Set up the prescriptive model](Step-4:-Set-up-the-prescriptive-model) * [Define the decision variables](Define-the-decision-variables) * [Express the business constraints](Express-the-business-constraints) * [Express the objective](Express-the-objective) * [Solve with Decision Optimization](Solve-with-Decision-Optimization) * [Step 5: Investigate the solution and run an example analysis](Step-5:-Investigate-the-solution-and-then-run-an-example-analysis)* [Summary](Summary) The business problem: Games Scheduling in the National Football League * A sports league with two divisions must schedule games so that each team plays every team within its division a given number of times, and each team plays teams in the other division a given number of times.* A team plays exactly one game each week. * A pair of teams cannot play each other on consecutive weeks.* While a third of a team's intradivisional games must be played in the first half of the season, the preference is for intradivisional games to be held as late as possible in the season. * To model this preference, there is an incentive for intradivisional games that increases each week as a square of the week. * An opponent must be assigned to each team each week to maximize the total of the incentives.. This is a type of discrete optimization problem that can be solved by using either **Integer Programming** (IP) or **Constraint Programming** (CP). > **Integer Programming** is the class of problems defined as the optimization of a linear function, subject to linear constraints over integer variables. > **Constraint Programming** problems generally have discrete decision variables, but the constraints can be logical, and the arithmetic expressions are not restricted to being linear. For the purposes of this tutorial, we will illustrate a solution with mathematical programming (MIP). How decision optimization can help* Prescriptive analytics (decision optimization) technology recommends actions that are based on desired outcomes. It takes into account specific scenarios, resources, and knowledge of past and current events. With this insight, your organization can make better decisions and have greater control of business outcomes. * Prescriptive analytics is the next step on the path to insight-based actions. It creates value through synergy with predictive analytics, which analyzes data to predict future outcomes. * Prescriptive analytics takes that insight to the next level by suggesting the optimal way to handle that future situation. Organizations that can act fast in dynamic conditions and make superior decisions in uncertain environments gain a strong competitive advantage. With prescriptive analytics, you can: * Automate the complex decisions and trade-offs to better manage your limited resources.* Take advantage of a future opportunity or mitigate a future risk.* Proactively update recommendations based on changing events.* Meet operational goals, increase customer loyalty, prevent threats and fraud, and optimize business processes. Use decision optimization Step 1: Import the libraryRun the following code to import the Decision Optimization CPLEX Modeling library. The *DOcplex* library contains the two modeling packages, Mathematical Programming (docplex.mp) and Constraint Programming (docplex.cp).
###Code
import sys
try:
import docplex.mp
except:
raise Exception('Please install docplex. See https://pypi.org/project/docplex/')
###Output
_____no_output_____
###Markdown
If *CPLEX* is not installed, install CPLEX Community edition.
###Code
try:
import cplex
except:
raise Exception('Please install CPLEX. See https://pypi.org/project/cplex/')
###Output
_____no_output_____
###Markdown
Step 2: Model the dataIn this scenario, the data is simple. There are eight teams in each division, and the teams must play each team in the division once and each team outside the division once.Use a Python module, *Collections*, which implements some data structures that will help solve some problems. *Named tuples* helps to define meaning of each position in a tuple. This helps the code be more readable and self-documenting. You can use named tuples in any place where you use tuples. In this example, you create a *namedtuple* to contain information for points. You are also defining some of the parameters.
###Code
# Teams in 1st division
team_div1 = ["Baltimore Ravens","Cincinnati Bengals", "Cleveland Browns","Pittsburgh Steelers","Houston Texans",
"Indianapolis Colts","Jacksonville Jaguars","Tennessee Titans","Buffalo Bills","Miami Dolphins",
"New England Patriots","New York Jets","Denver Broncos","Kansas City Chiefs","Oakland Raiders",
"San Diego Chargers"]
# Teams in 2nd division
team_div2 = ["Chicago Bears","Detroit Lions","Green Bay Packers","Minnesota Vikings","Atlanta Falcons",
"Carolina Panthers","New Orleans Saints","Tampa Bay Buccaneers","Dallas Cowboys","New York Giants",
"Philadelphia Eagles","Washington Redskins","Arizona Cardinals","San Francisco 49ers",
"Seattle Seahawks","St. Louis Rams"]
#number_of_matches_to_play = 1 # Number of match to play between two teams on the league
# Schedule parameters
nb_teams_in_division = 5
max_teams_in_division = 10
number_of_matches_inside_division = 1
number_of_matches_outside_division = 1
###Output
_____no_output_____
###Markdown
Use basic HTML and a stylesheet to format the data.
###Code
CSS = """
body {
margin: 0;
font-family: Helvetica;
}
table.dataframe {
border-collapse: collapse;
border: none;
}
table.dataframe tr {
border: none;
}
table.dataframe td, table.dataframe th {
margin: 0;
border: 1px solid white;
padding-left: 0.25em;
padding-right: 0.25em;
}
table.dataframe th:not(:empty) {
background-color: #fec;
text-align: left;
font-weight: normal;
}
table.dataframe tr:nth-child(2) th:empty {
border-left: none;
border-right: 1px dashed #888;
}
table.dataframe td {
border: 2px solid #ccf;
background-color: #f4f4ff;
}
table.dataframe thead th:first-child {
display: none;
}
table.dataframe tbody th {
display: none;
}
"""
from IPython.core.display import HTML
HTML('<style>{}</style>'.format(CSS))
###Output
_____no_output_____
###Markdown
Now you will import the *pandas* library. Pandas is an open source Python library for data analysis. It uses two data structures, *Series* and *DataFrame*, which are built on top of *NumPy*.A **Series** is a one-dimensional object similar to an array, list, or column in a table. It will assign a labeled index to each item in the series. By default, each item receives an index label from 0 to N, where N is the length of the series minus one.A **DataFrame** is a tabular data structure comprised of rows and columns, similar to a spreadsheet, database table, or R's data.frame object. Think of a DataFrame as a group of Series objects that share an index (the column names).In the example, each division (the AFC and the NFC) is part of a DataFrame.
###Code
import pandas as pd
team1 = pd.DataFrame(team_div1)
team2 = pd.DataFrame(team_div2)
team1.columns = ["AFC"]
team2.columns = ["NFC"]
teams = pd.concat([team1,team2], axis=1)
###Output
_____no_output_____
###Markdown
The following *display* function is a tool to show different representations of objects. When you issue the *display(teams)* command, you are sending the output to the notebook so that the result is stored in the document.
###Code
from IPython.display import display
display(teams)
###Output
_____no_output_____
###Markdown
Step 3: Prepare the dataGiven the number of teams in each division and the number of intradivisional and interdivisional games to be played, you can calculate the total number of teams and the number of weeks in the schedule, assuming every team plays exactly one game per week. The season is split into halves, and the number of the intradivisional games that each team must play in the first half of the season is calculated.
###Code
import numpy as np
nb_teams = 2 * nb_teams_in_division
teams = range(nb_teams)
# Calculate the number of weeks necessary
nb_inside_div = (nb_teams_in_division - 1) * number_of_matches_inside_division
nb_outside_div = nb_teams_in_division * number_of_matches_outside_division
nb_weeks = nb_inside_div + nb_outside_div
# Weeks to schedule
weeks = range(nb_weeks)
# Season is split into two halves
first_half_weeks = range(int(np.floor(nb_weeks / 2)))
nb_first_half_games = int(np.floor(nb_weeks / 3))
from collections import namedtuple
match = namedtuple("match",["team1","team2","is_divisional"])
matches = {match(t1,t2, 1 if ( t2 <= nb_teams_in_division or t1 > nb_teams_in_division) else 0)
for t1 in teams for t2 in teams if t1 < t2}
###Output
_____no_output_____
###Markdown
Number of games to play between pairs depends on whether the pairing is intradivisional or not.
###Code
nb_play = { m : number_of_matches_inside_division if m.is_divisional==1
else number_of_matches_outside_division
for m in matches}
###Output
_____no_output_____
###Markdown
Step 4: Set up the prescriptive model
###Code
from docplex.mp.environment import Environment
env = Environment()
env.print_information()
###Output
_____no_output_____
###Markdown
Create the DOcplex modelThe model contains all the business constraints and defines the objective.
###Code
from docplex.mp.model import Model
mdl = Model("sports")
###Output
_____no_output_____
###Markdown
Define the decision variables
###Code
plays = mdl.binary_var_matrix(matches, weeks, lambda ij: "x_%s_%d" %(str(ij[0]), ij[1]))
###Output
_____no_output_____
###Markdown
Express the business constraints Each pair of teams must play the correct number of games.
###Code
mdl.add_constraints( mdl.sum(plays[m,w] for w in weeks) == nb_play[m]
for m in matches)
mdl.print_information()
###Output
_____no_output_____
###Markdown
Each team must play exactly once in a week.
###Code
mdl.add_constraints( mdl.sum(plays[m,w] for m in matches if (m.team1 == t or m.team2 == t) ) == 1
for w in weeks for t in teams)
mdl.print_information()
###Output
_____no_output_____
###Markdown
Games between the same teams cannot be on successive weeks.
###Code
mdl.add_constraints( plays[m,w] + plays[m,w+1] <= 1
for w in weeks
for m in matches
if w < nb_weeks-1)
mdl.print_information()
###Output
_____no_output_____
###Markdown
Some intradivisional games should be in the first half.
###Code
mdl.add_constraints( mdl.sum(plays[m,w] for w in first_half_weeks for m in matches
if (((m.team1 == t or m.team2 == t) and m.is_divisional == 1 )))
>= nb_first_half_games
for t in teams)
mdl.print_information()
###Output
_____no_output_____
###Markdown
Express the objectiveThe objective function for this example is designed to force intradivisional games to occur as late in the season as possible. The incentive for intradivisional games increases by week. There is no incentive for interdivisional games.
###Code
gain = { w : w*w for w in weeks}
# If an intradivisional pair plays in week w, Gain[w] is added to the objective.
mdl.maximize( mdl.sum (m.is_divisional * gain[w] * plays[m,w] for m in matches for w in weeks) )
###Output
_____no_output_____
###Markdown
Solve with Decision Optimization You will get the best solution found after n seconds, due to a time limit parameter.
###Code
mdl.print_information()
assert mdl.solve(), "!!! Solve of the model fails"
mdl.report()
###Output
_____no_output_____
###Markdown
Step 5: Investigate the solution and then run an example analysis Determine which of the scheduled games will be a replay of one of the last 10 Super Bowls.We start by creating a pandas DataFrame that contains the year and teams who played the last 10 Super Bowls.
###Code
try: # Python 2
team_league = dict({t : team_div1[t] for t in range(nb_teams_in_division) }.items() + \
{t+nb_teams_in_division : team_div2[t] for t in range(nb_teams_in_division) }.items()
)
except: # Python 3
team_league = dict(list({t : team_div1[t] for t in range(nb_teams_in_division) }.items()) + \
list({t+nb_teams_in_division : team_div2[t] for t in range(nb_teams_in_division) }.items()))
sol = namedtuple("solution",["week","is_divisional", "team1", "team2"])
solution = [sol(w, m.is_divisional, team_league[m.team1], team_league[m.team2]) for m in matches for w in weeks if plays[m,w].solution_value == 1]
nfl_finals = [("2016", "Carolina Panthers", "Denver Broncos"),
("2015", "New England Patriots", "Seattle Seahawks"),
("2014", "Seattle Seahawks", "Denver Broncos"),
("2013", "Baltimore Ravens", "San Francisco 49ers"),
("2012", "New York Giants", "New England Patriots "),
("2011", "Green Bay Packers", "Pittsburgh Steelers"),
("2010", "New Orleans Saints", "Indianapolis Colts"),
("2009", "Pittsburgh Steelers", "Arizona Cardinals"),
("2008", "New York Giants", "New England Patriots"),
("2007", "Indianapolis Colts", "Chicago Bears")
]
nfl_meetings = {(t[1], t[2]) for t in nfl_finals}
winners_bd = pd.DataFrame(nfl_finals)
winners_bd.columns = ["year", "team1", "team2"]
display(winners_bd)
###Output
_____no_output_____
###Markdown
We now look for the games in our solution that are replays of one of the past 10 Super Bowls.
###Code
months = ["January", "February", "March", "April", "May", "June",
"July", "August", "September", "October", "November", "December"]
report = []
for m in solution:
if (m.team1, m.team2) in nfl_meetings:
report.append((m.week, months[m.week//4], m.team1, m.team2))
if (m.team2, m.team1) in nfl_meetings:
report.append((m.week, months[m.week//4], m.team2, m.team1))
print(report)
matches_bd = pd.DataFrame(report)
matches_bd.columns = ["week", "Month", "Team1", "Team2"]
try: #pandas >= 0.17
display(matches_bd.sort_values(by='week'))
except:
display(matches_bd.sort('week'))
###Output
_____no_output_____
###Markdown
Use decision optimization to help a sports league schedule its gamesThis tutorial includes everything you need to set up decision optimization engines, build mathematical programming models, and arrive at a good working schedule for a sports league's games.When you finish this tutorial, you'll have a foundational knowledge of _Prescriptive Analytics_.>This notebook is part of the [Prescriptive Analytics for Python](http://ibmdecisionoptimization.github.io/docplex-doc/)>Running the sample requires the installation of [CPLEX Optimization studio](https://www.ibm.com/products/ilog-cplex-optimization-studio) (Commercial or free [CPLEX Community edition](https://www.ibm.com/account/reg/us-en/signup?formid=urx-20028>`)). This sample automatically installs *CPLEX CE* if needed.Table of contents:- [The business problem](The-business-problem:--Games-Scheduling-in-the-National-Football-League)* [How decision optimization (prescriptive analytics) can help](How--decision-optimization-can-help)* [Use decision optimization](Use-decision-optimization) * [Step 1: Import the library](Step-1:-Import-the-library) - [Step 2: Model the Data](Step-2:-Model-the-data) * [Step 3: Prepare the data](Step-3:-Prepare-the-data) - [Step 4: Set up the prescriptive model](Step-4:-Set-up-the-prescriptive-model) * [Define the decision variables](Define-the-decision-variables) * [Express the business constraints](Express-the-business-constraints) * [Express the objective](Express-the-objective) * [Solve with Decision Optimization](Solve-with-Decision-Optimization) * [Step 5: Investigate the solution and run an example analysis](Step-5:-Investigate-the-solution-and-then-run-an-example-analysis)* [Summary](Summary) The business problem: Games Scheduling in the National Football League * A sports league with two divisions must schedule games so that each team plays every team within its division a given number of times, and each team plays teams in the other division a given number of times.* A team plays exactly one game each week. * A pair of teams cannot play each other on consecutive weeks.* While a third of a team's intradivisional games must be played in the first half of the season, the preference is for intradivisional games to be held as late as possible in the season. * To model this preference, there is an incentive for intradivisional games that increases each week as a square of the week. * An opponent must be assigned to each team each week to maximize the total of the incentives.. This is a type of discrete optimization problem that can be solved by using either **Integer Programming** (IP) or **Constraint Programming** (CP). > **Integer Programming** is the class of problems defined as the optimization of a linear function, subject to linear constraints over integer variables. > **Constraint Programming** problems generally have discrete decision variables, but the constraints can be logical, and the arithmetic expressions are not restricted to being linear. For the purposes of this tutorial, we will illustrate a solution with mathematical programming (MIP). How decision optimization can help* Prescriptive analytics (decision optimization) technology recommends actions that are based on desired outcomes. It takes into account specific scenarios, resources, and knowledge of past and current events. With this insight, your organization can make better decisions and have greater control of business outcomes. * Prescriptive analytics is the next step on the path to insight-based actions. It creates value through synergy with predictive analytics, which analyzes data to predict future outcomes. * Prescriptive analytics takes that insight to the next level by suggesting the optimal way to handle that future situation. Organizations that can act fast in dynamic conditions and make superior decisions in uncertain environments gain a strong competitive advantage. With prescriptive analytics, you can: * Automate the complex decisions and trade-offs to better manage your limited resources.* Take advantage of a future opportunity or mitigate a future risk.* Proactively update recommendations based on changing events.* Meet operational goals, increase customer loyalty, prevent threats and fraud, and optimize business processes. Use decision optimization Step 1: Import the libraryRun the following code to import the Decision Optimization CPLEX Modeling library. The *DOcplex* library contains the two modeling packages, Mathematical Programming (docplex.mp) and Constraint Programming (docplex.cp).
###Code
import sys
try:
import docplex.mp
except:
raise Exception('Please install docplex. See https://pypi.org/project/docplex/')
###Output
_____no_output_____
###Markdown
If *CPLEX* is not installed, install CPLEX Community edition.
###Code
try:
import cplex
except:
raise Exception('Please install CPLEX. See https://pypi.org/project/cplex/')
###Output
_____no_output_____
###Markdown
Step 2: Model the dataIn this scenario, the data is simple. There are eight teams in each division, and the teams must play each team in the division once and each team outside the division once.Use a Python module, *Collections*, which implements some data structures that will help solve some problems. *Named tuples* helps to define meaning of each position in a tuple. This helps the code be more readable and self-documenting. You can use named tuples in any place where you use tuples. In this example, you create a *namedtuple* to contain information for points. You are also defining some of the parameters.
###Code
# Teams in 1st division
team_div1 = ["Baltimore Ravens","Cincinnati Bengals", "Cleveland Browns","Pittsburgh Steelers","Houston Texans",
"Indianapolis Colts","Jacksonville Jaguars","Tennessee Titans","Buffalo Bills","Miami Dolphins",
"New England Patriots","New York Jets","Denver Broncos","Kansas City Chiefs","Oakland Raiders",
"San Diego Chargers"]
# Teams in 2nd division
team_div2 = ["Chicago Bears","Detroit Lions","Green Bay Packers","Minnesota Vikings","Atlanta Falcons",
"Carolina Panthers","New Orleans Saints","Tampa Bay Buccaneers","Dallas Cowboys","New York Giants",
"Philadelphia Eagles","Washington Redskins","Arizona Cardinals","San Francisco 49ers",
"Seattle Seahawks","St. Louis Rams"]
#number_of_matches_to_play = 1 # Number of match to play between two teams on the league
# Schedule parameters
nb_teams_in_division = 5
max_teams_in_division = 10
number_of_matches_inside_division = 1
number_of_matches_outside_division = 1
###Output
_____no_output_____
###Markdown
Use basic HTML and a stylesheet to format the data.
###Code
CSS = """
body {
margin: 0;
font-family: Helvetica;
}
table.dataframe {
border-collapse: collapse;
border: none;
}
table.dataframe tr {
border: none;
}
table.dataframe td, table.dataframe th {
margin: 0;
border: 1px solid white;
padding-left: 0.25em;
padding-right: 0.25em;
}
table.dataframe th:not(:empty) {
background-color: #fec;
text-align: left;
font-weight: normal;
}
table.dataframe tr:nth-child(2) th:empty {
border-left: none;
border-right: 1px dashed #888;
}
table.dataframe td {
border: 2px solid #ccf;
background-color: #f4f4ff;
}
table.dataframe thead th:first-child {
display: none;
}
table.dataframe tbody th {
display: none;
}
"""
from IPython.core.display import HTML
HTML('<style>{}</style>'.format(CSS))
###Output
_____no_output_____
###Markdown
Now you will import the *pandas* library. Pandas is an open source Python library for data analysis. It uses two data structures, *Series* and *DataFrame*, which are built on top of *NumPy*.A **Series** is a one-dimensional object similar to an array, list, or column in a table. It will assign a labeled index to each item in the series. By default, each item receives an index label from 0 to N, where N is the length of the series minus one.A **DataFrame** is a tabular data structure comprised of rows and columns, similar to a spreadsheet, database table, or R's data.frame object. Think of a DataFrame as a group of Series objects that share an index (the column names).In the example, each division (the AFC and the NFC) is part of a DataFrame.
###Code
import pandas as pd
team1 = pd.DataFrame(team_div1)
team2 = pd.DataFrame(team_div2)
team1.columns = ["AFC"]
team2.columns = ["NFC"]
teams = pd.concat([team1,team2], axis=1)
###Output
_____no_output_____
###Markdown
The following *display* function is a tool to show different representations of objects. When you issue the *display(teams)* command, you are sending the output to the notebook so that the result is stored in the document.
###Code
from IPython.display import display
display(teams)
###Output
_____no_output_____
###Markdown
Step 3: Prepare the dataGiven the number of teams in each division and the number of intradivisional and interdivisional games to be played, you can calculate the total number of teams and the number of weeks in the schedule, assuming every team plays exactly one game per week. The season is split into halves, and the number of the intradivisional games that each team must play in the first half of the season is calculated.
###Code
import numpy as np
nb_teams = 2 * nb_teams_in_division
teams = range(nb_teams)
# Calculate the number of weeks necessary
nb_inside_div = (nb_teams_in_division - 1) * number_of_matches_inside_division
nb_outside_div = nb_teams_in_division * number_of_matches_outside_division
nb_weeks = nb_inside_div + nb_outside_div
# Weeks to schedule
weeks = range(nb_weeks)
# Season is split into two halves
first_half_weeks = range(int(np.floor(nb_weeks / 2)))
nb_first_half_games = int(np.floor(nb_weeks / 3))
from collections import namedtuple
match = namedtuple("match",["team1","team2","is_divisional"])
matches = {match(t1,t2, 1 if ( t2 <= nb_teams_in_division or t1 > nb_teams_in_division) else 0)
for t1 in teams for t2 in teams if t1 < t2}
###Output
_____no_output_____
###Markdown
Number of games to play between pairs depends on whether the pairing is intradivisional or not.
###Code
nb_play = { m : number_of_matches_inside_division if m.is_divisional==1
else number_of_matches_outside_division
for m in matches}
###Output
_____no_output_____
###Markdown
Step 4: Set up the prescriptive model
###Code
from docplex.mp.environment import Environment
env = Environment()
env.print_information()
###Output
_____no_output_____
###Markdown
Create the DOcplex modelThe model contains all the business constraints and defines the objective.
###Code
from docplex.mp.model import Model
mdl = Model("sports")
###Output
_____no_output_____
###Markdown
Define the decision variables
###Code
plays = mdl.binary_var_matrix(matches, weeks, lambda ij: "x_%s_%d" %(str(ij[0]), ij[1]))
###Output
_____no_output_____
###Markdown
Express the business constraints Each pair of teams must play the correct number of games.
###Code
mdl.add_constraints( mdl.sum(plays[m,w] for w in weeks) == nb_play[m]
for m in matches)
mdl.print_information()
###Output
_____no_output_____
###Markdown
Each team must play exactly once in a week.
###Code
mdl.add_constraints( mdl.sum(plays[m,w] for m in matches if (m.team1 == t or m.team2 == t) ) == 1
for w in weeks for t in teams)
mdl.print_information()
###Output
_____no_output_____
###Markdown
Games between the same teams cannot be on successive weeks.
###Code
mdl.add_constraints( plays[m,w] + plays[m,w+1] <= 1
for w in weeks
for m in matches
if w < nb_weeks-1)
mdl.print_information()
###Output
_____no_output_____
###Markdown
Some intradivisional games should be in the first half.
###Code
mdl.add_constraints( mdl.sum(plays[m,w] for w in first_half_weeks for m in matches
if (((m.team1 == t or m.team2 == t) and m.is_divisional == 1 )))
>= nb_first_half_games
for t in teams)
mdl.print_information()
###Output
_____no_output_____
###Markdown
Express the objectiveThe objective function for this example is designed to force intradivisional games to occur as late in the season as possible. The incentive for intradivisional games increases by week. There is no incentive for interdivisional games.
###Code
gain = { w : w*w for w in weeks}
# If an intradivisional pair plays in week w, Gain[w] is added to the objective.
mdl.maximize( mdl.sum (m.is_divisional * gain[w] * plays[m,w] for m in matches for w in weeks) )
###Output
_____no_output_____
###Markdown
Solve with Decision Optimization You will get the best solution found after n seconds, due to a time limit parameter.
###Code
mdl.print_information()
assert mdl.solve(), "!!! Solve of the model fails"
mdl.report()
###Output
_____no_output_____
###Markdown
Step 5: Investigate the solution and then run an example analysis Determine which of the scheduled games will be a replay of one of the last 10 Super Bowls.We start by creating a pandas DataFrame that contains the year and teams who played the last 10 Super Bowls.
###Code
try: # Python 2
team_league = dict({t : team_div1[t] for t in range(nb_teams_in_division) }.items() + \
{t+nb_teams_in_division : team_div2[t] for t in range(nb_teams_in_division) }.items()
)
except: # Python 3
team_league = dict(list({t : team_div1[t] for t in range(nb_teams_in_division) }.items()) + \
list({t+nb_teams_in_division : team_div2[t] for t in range(nb_teams_in_division) }.items()))
sol = namedtuple("solution",["week","is_divisional", "team1", "team2"])
solution = [sol(w, m.is_divisional, team_league[m.team1], team_league[m.team2]) for m in matches for w in weeks if plays[m,w].solution_value == 1]
nfl_finals = [("2016", "Carolina Panthers", "Denver Broncos"),
("2015", "New England Patriots", "Seattle Seahawks"),
("2014", "Seattle Seahawks", "Denver Broncos"),
("2013", "Baltimore Ravens", "San Francisco 49ers"),
("2012", "New York Giants", "New England Patriots "),
("2011", "Green Bay Packers", "Pittsburgh Steelers"),
("2010", "New Orleans Saints", "Indianapolis Colts"),
("2009", "Pittsburgh Steelers", "Arizona Cardinals"),
("2008", "New York Giants", "New England Patriots"),
("2007", "Indianapolis Colts", "Chicago Bears")
]
nfl_meetings = {(t[1], t[2]) for t in nfl_finals}
winners_bd = pd.DataFrame(nfl_finals)
winners_bd.columns = ["year", "team1", "team2"]
display(winners_bd)
###Output
_____no_output_____
###Markdown
We now look for the games in our solution that are replays of one of the past 10 Super Bowls.
###Code
months = ["January", "February", "March", "April", "May", "June",
"July", "August", "September", "October", "November", "December"]
report = []
for m in solution:
if (m.team1, m.team2) in nfl_meetings:
report.append((m.week, months[m.week//4], m.team1, m.team2))
if (m.team2, m.team1) in nfl_meetings:
report.append((m.week, months[m.week//4], m.team2, m.team1))
print(report)
matches_bd = pd.DataFrame(report)
matches_bd.columns = ["week", "Month", "Team1", "Team2"]
try: #pandas >= 0.17
display(matches_bd.sort_values(by='week'))
except:
display(matches_bd.sort('week'))
###Output
_____no_output_____
###Markdown
Use decision optimization to help a sports league schedule its gamesThis tutorial includes everything you need to set up decision optimization engines, build mathematical programming models, and arrive at a good working schedule for a sports league's games.When you finish this tutorial, you'll have a foundational knowledge of _Prescriptive Analytics_.>This notebook is part of [Prescriptive Analytics for Python](http://ibmdecisionoptimization.github.io/docplex-doc/)>>It requires either an [installation of CPLEX Optimizers](http://ibmdecisionoptimization.github.io/docplex-doc/getting_started.html) or it can be run on [IBM Cloud Pak for Data as a Service](https://www.ibm.com/products/cloud-pak-for-data/as-a-service/) (Sign up for a [free IBM Cloud account](https://dataplatform.cloud.ibm.com/registration/stepone?context=wdp&apps=all>)and you can start using `IBM Cloud Pak for Data as a Service` right away).>> CPLEX is available on IBM Cloud Pack for Data and IBM Cloud Pak for Data as a Service:> - IBM Cloud Pak for Data as a Service: Depends on the runtime used:> - Python 3.x runtime: Community edition> - Python 3.x + DO runtime: full edition> - Cloud Pack for Data: Community edition is installed by default. Please install `DO` addon in `Watson Studio Premium` for the full editionTable of contents:- [The business problem](The-business-problem:--Games-Scheduling-in-the-National-Football-League)* [How decision optimization (prescriptive analytics) can help](How--decision-optimization-can-help)* [Use decision optimization](Use-decision-optimization) * [Step 1: Import the library](Step-1:-Import-the-library) - [Step 2: Model the Data](Step-2:-Model-the-data) * [Step 3: Prepare the data](Step-3:-Prepare-the-data) - [Step 4: Set up the prescriptive model](Step-4:-Set-up-the-prescriptive-model) * [Define the decision variables](Define-the-decision-variables) * [Express the business constraints](Express-the-business-constraints) * [Express the objective](Express-the-objective) * [Solve with Decision Optimization](Solve-with-Decision-Optimization) * [Step 5: Investigate the solution and run an example analysis](Step-5:-Investigate-the-solution-and-then-run-an-example-analysis)* [Summary](Summary) The business problem: Games Scheduling in the National Football League * A sports league with two divisions must schedule games so that each team plays every team within its division a given number of times, and each team plays teams in the other division a given number of times.* A team plays exactly one game each week. * A pair of teams cannot play each other on consecutive weeks.* While a third of a team's intradivisional games must be played in the first half of the season, the preference is for intradivisional games to be held as late as possible in the season. * To model this preference, there is an incentive for intradivisional games that increases each week as a square of the week. * An opponent must be assigned to each team each week to maximize the total of the incentives.. This is a type of discrete optimization problem that can be solved by using either **Integer Programming** (IP) or **Constraint Programming** (CP). > **Integer Programming** is the class of problems defined as the optimization of a linear function, subject to linear constraints over integer variables. > **Constraint Programming** problems generally have discrete decision variables, but the constraints can be logical, and the arithmetic expressions are not restricted to being linear. For the purposes of this tutorial, we will illustrate a solution with mathematical programming (MIP). How decision optimization can help* Prescriptive analytics (decision optimization) technology recommends actions that are based on desired outcomes. It takes into account specific scenarios, resources, and knowledge of past and current events. With this insight, your organization can make better decisions and have greater control of business outcomes. * Prescriptive analytics is the next step on the path to insight-based actions. It creates value through synergy with predictive analytics, which analyzes data to predict future outcomes. * Prescriptive analytics takes that insight to the next level by suggesting the optimal way to handle that future situation. Organizations that can act fast in dynamic conditions and make superior decisions in uncertain environments gain a strong competitive advantage. With prescriptive analytics, you can: * Automate the complex decisions and trade-offs to better manage your limited resources.* Take advantage of a future opportunity or mitigate a future risk.* Proactively update recommendations based on changing events.* Meet operational goals, increase customer loyalty, prevent threats and fraud, and optimize business processes. Use decision optimization Step 1: Import the libraryRun the following code to import the Decision Optimization CPLEX Modeling library. The *DOcplex* library contains the two modeling packages, Mathematical Programming (docplex.mp) and Constraint Programming (docplex.cp).
###Code
import sys
try:
import docplex.mp
except:
raise Exception('Please install docplex. See https://pypi.org/project/docplex/')
###Output
_____no_output_____
###Markdown
If *CPLEX* is not installed, install CPLEX Community edition.
###Code
try:
import cplex
except:
raise Exception('Please install CPLEX. See https://pypi.org/project/cplex/')
###Output
_____no_output_____
###Markdown
Step 2: Model the dataIn this scenario, the data is simple. There are eight teams in each division, and the teams must play each team in the division once and each team outside the division once.Use a Python module, *Collections*, which implements some data structures that will help solve some problems. *Named tuples* helps to define meaning of each position in a tuple. This helps the code be more readable and self-documenting. You can use named tuples in any place where you use tuples. In this example, you create a *namedtuple* to contain information for points. You are also defining some of the parameters.
###Code
# Teams in 1st division
team_div1 = ["Baltimore Ravens","Cincinnati Bengals", "Cleveland Browns","Pittsburgh Steelers","Houston Texans",
"Indianapolis Colts","Jacksonville Jaguars","Tennessee Titans","Buffalo Bills","Miami Dolphins",
"New England Patriots","New York Jets","Denver Broncos","Kansas City Chiefs","Oakland Raiders",
"San Diego Chargers"]
# Teams in 2nd division
team_div2 = ["Chicago Bears","Detroit Lions","Green Bay Packers","Minnesota Vikings","Atlanta Falcons",
"Carolina Panthers","New Orleans Saints","Tampa Bay Buccaneers","Dallas Cowboys","New York Giants",
"Philadelphia Eagles","Washington Redskins","Arizona Cardinals","San Francisco 49ers",
"Seattle Seahawks","St. Louis Rams"]
#number_of_matches_to_play = 1 # Number of match to play between two teams on the league
# Schedule parameters
nb_teams_in_division = 5
max_teams_in_division = 10
number_of_matches_inside_division = 1
number_of_matches_outside_division = 1
###Output
_____no_output_____
###Markdown
Use basic HTML and a stylesheet to format the data.
###Code
CSS = """
body {
margin: 0;
font-family: Helvetica;
}
table.dataframe {
border-collapse: collapse;
border: none;
}
table.dataframe tr {
border: none;
}
table.dataframe td, table.dataframe th {
margin: 0;
border: 1px solid white;
padding-left: 0.25em;
padding-right: 0.25em;
}
table.dataframe th:not(:empty) {
background-color: #fec;
text-align: left;
font-weight: normal;
}
table.dataframe tr:nth-child(2) th:empty {
border-left: none;
border-right: 1px dashed #888;
}
table.dataframe td {
border: 2px solid #ccf;
background-color: #f4f4ff;
}
table.dataframe thead th:first-child {
display: none;
}
table.dataframe tbody th {
display: none;
}
"""
from IPython.core.display import HTML
HTML('<style>{}</style>'.format(CSS))
###Output
_____no_output_____
###Markdown
Now you will import the *pandas* library. Pandas is an open source Python library for data analysis. It uses two data structures, *Series* and *DataFrame*, which are built on top of *NumPy*.A **Series** is a one-dimensional object similar to an array, list, or column in a table. It will assign a labeled index to each item in the series. By default, each item receives an index label from 0 to N, where N is the length of the series minus one.A **DataFrame** is a tabular data structure comprised of rows and columns, similar to a spreadsheet, database table, or R's data.frame object. Think of a DataFrame as a group of Series objects that share an index (the column names).In the example, each division (the AFC and the NFC) is part of a DataFrame.
###Code
import pandas as pd
team1 = pd.DataFrame(team_div1)
team2 = pd.DataFrame(team_div2)
team1.columns = ["AFC"]
team2.columns = ["NFC"]
teams = pd.concat([team1,team2], axis=1)
###Output
_____no_output_____
###Markdown
The following *display* function is a tool to show different representations of objects. When you issue the *display(teams)* command, you are sending the output to the notebook so that the result is stored in the document.
###Code
from IPython.display import display
display(teams)
###Output
_____no_output_____
###Markdown
Step 3: Prepare the dataGiven the number of teams in each division and the number of intradivisional and interdivisional games to be played, you can calculate the total number of teams and the number of weeks in the schedule, assuming every team plays exactly one game per week. The season is split into halves, and the number of the intradivisional games that each team must play in the first half of the season is calculated.
###Code
import numpy as np
nb_teams = 2 * nb_teams_in_division
teams = range(nb_teams)
# Calculate the number of weeks necessary
nb_inside_div = (nb_teams_in_division - 1) * number_of_matches_inside_division
nb_outside_div = nb_teams_in_division * number_of_matches_outside_division
nb_weeks = nb_inside_div + nb_outside_div
# Weeks to schedule
weeks = range(nb_weeks)
# Season is split into two halves
first_half_weeks = range(int(np.floor(nb_weeks / 2)))
nb_first_half_games = int(np.floor(nb_weeks / 3))
from collections import namedtuple
match = namedtuple("match",["team1","team2","is_divisional"])
matches = {match(t1,t2, 1 if ( t2 <= nb_teams_in_division or t1 > nb_teams_in_division) else 0)
for t1 in teams for t2 in teams if t1 < t2}
###Output
_____no_output_____
###Markdown
Number of games to play between pairs depends on whether the pairing is intradivisional or not.
###Code
nb_play = { m : number_of_matches_inside_division if m.is_divisional==1
else number_of_matches_outside_division
for m in matches}
###Output
_____no_output_____
###Markdown
Step 4: Set up the prescriptive model Create the DOcplex modelThe model contains all the business constraints and defines the objective.
###Code
from docplex.mp.model import Model
mdl = Model("sports")
###Output
_____no_output_____
###Markdown
Define the decision variables
###Code
plays = mdl.binary_var_matrix(matches, weeks, lambda ij: "x_%s_%d" %(str(ij[0]), ij[1]))
###Output
_____no_output_____
###Markdown
Express the business constraints Each pair of teams must play the correct number of games.
###Code
mdl.add_constraints( mdl.sum(plays[m,w] for w in weeks) == nb_play[m]
for m in matches)
mdl.print_information()
###Output
Model: sports
- number of variables: 405
- binary=405, integer=0, continuous=0
- number of constraints: 45
- linear=45
- parameters: defaults
- objective: none
- problem type is: MILP
###Markdown
Each team must play exactly once in a week.
###Code
mdl.add_constraints( mdl.sum(plays[m,w] for m in matches if (m.team1 == t or m.team2 == t) ) == 1
for w in weeks for t in teams)
mdl.print_information()
###Output
Model: sports
- number of variables: 405
- binary=405, integer=0, continuous=0
- number of constraints: 135
- linear=135
- parameters: defaults
- objective: none
- problem type is: MILP
###Markdown
Games between the same teams cannot be on successive weeks.
###Code
mdl.add_constraints( plays[m,w] + plays[m,w+1] <= 1
for w in weeks
for m in matches
if w < nb_weeks-1)
mdl.print_information()
###Output
Model: sports
- number of variables: 405
- binary=405, integer=0, continuous=0
- number of constraints: 495
- linear=495
- parameters: defaults
- objective: none
- problem type is: MILP
###Markdown
Some intradivisional games should be in the first half.
###Code
mdl.add_constraints( mdl.sum(plays[m,w] for w in first_half_weeks for m in matches
if (((m.team1 == t or m.team2 == t) and m.is_divisional == 1 )))
>= nb_first_half_games
for t in teams)
mdl.print_information()
###Output
Model: sports
- number of variables: 405
- binary=405, integer=0, continuous=0
- number of constraints: 505
- linear=505
- parameters: defaults
- objective: none
- problem type is: MILP
###Markdown
Express the objectiveThe objective function for this example is designed to force intradivisional games to occur as late in the season as possible. The incentive for intradivisional games increases by week. There is no incentive for interdivisional games.
###Code
gain = { w : w*w for w in weeks}
# If an intradivisional pair plays in week w, Gain[w] is added to the objective.
mdl.maximize( mdl.sum (m.is_divisional * gain[w] * plays[m,w] for m in matches for w in weeks) )
###Output
_____no_output_____
###Markdown
Solve with Decision Optimization You will get the best solution found after n seconds, due to a time limit parameter.
###Code
mdl.print_information()
assert mdl.solve(), "!!! Solve of the model fails"
mdl.report()
###Output
Model: sports
- number of variables: 405
- binary=405, integer=0, continuous=0
- number of constraints: 505
- linear=505
- parameters: defaults
- objective: maximize
- problem type is: MILP
* model sports solved with objective = 260.000
###Markdown
Step 5: Investigate the solution and then run an example analysis Determine which of the scheduled games will be a replay of one of the last 10 Super Bowls.We start by creating a pandas DataFrame that contains the year and teams who played the last 10 Super Bowls.
###Code
try: # Python 2
team_league = dict({t : team_div1[t] for t in range(nb_teams_in_division) }.items() + \
{t+nb_teams_in_division : team_div2[t] for t in range(nb_teams_in_division) }.items()
)
except: # Python 3
team_league = dict(list({t : team_div1[t] for t in range(nb_teams_in_division) }.items()) + \
list({t+nb_teams_in_division : team_div2[t] for t in range(nb_teams_in_division) }.items()))
sol = namedtuple("solution",["week","is_divisional", "team1", "team2"])
solution = [sol(w, m.is_divisional, team_league[m.team1], team_league[m.team2]) for m in matches for w in weeks if plays[m,w].solution_value == 1]
nfl_finals = [("2016", "Carolina Panthers", "Denver Broncos"),
("2015", "New England Patriots", "Seattle Seahawks"),
("2014", "Seattle Seahawks", "Denver Broncos"),
("2013", "Baltimore Ravens", "San Francisco 49ers"),
("2012", "New York Giants", "New England Patriots "),
("2011", "Green Bay Packers", "Pittsburgh Steelers"),
("2010", "New Orleans Saints", "Indianapolis Colts"),
("2009", "Pittsburgh Steelers", "Arizona Cardinals"),
("2008", "New York Giants", "New England Patriots"),
("2007", "Indianapolis Colts", "Chicago Bears")
]
nfl_meetings = {(t[1], t[2]) for t in nfl_finals}
winners_bd = pd.DataFrame(nfl_finals)
winners_bd.columns = ["year", "team1", "team2"]
display(winners_bd)
###Output
_____no_output_____
###Markdown
We now look for the games in our solution that are replays of one of the past 10 Super Bowls.
###Code
months = ["January", "February", "March", "April", "May", "June",
"July", "August", "September", "October", "November", "December"]
report = []
for m in solution:
if (m.team1, m.team2) in nfl_meetings:
report.append((m.week, months[m.week//4], m.team1, m.team2))
if (m.team2, m.team1) in nfl_meetings:
report.append((m.week, months[m.week//4], m.team2, m.team1))
print(report)
matches_bd = pd.DataFrame(report)
matches_bd.columns = ["week", "Month", "Team1", "Team2"]
try: #pandas >= 0.17
display(matches_bd.sort_values(by='week'))
except:
display(matches_bd.sort('week'))
###Output
_____no_output_____ |
source/notebooks/Courses/Acceleration and Velocity experiment/Acceleration and Velocity experiment.ipynb | ###Markdown
Acceleration and Velocity experiment 1.0 Theory--- 1.1 Velocity The velocity of an object is the rate of change of its position with respect to a frame of reference, and is a function of time. Velocity is equivalent to a specification of an object's speed and direction of motion (e.g. 60 km/h to the north). Velocity is a fundamental concept in kinematics, the branch of classical mechanics that describes the motion of bodies.Velocity is a physical vector quantity; both magnitude and direction are needed to define it. The scalar absolute value (magnitude) of velocity is called speed, being a coherent derived unit whose quantity is measured in the SI (metric system) as metres per second (m/s) or as the SI base unit of (m⋅s−1). For example, "5 metres per second" is a scalar, whereas "5 metres per second east" is a vector. If there is a change in speed, direction or both, then the object has a changing velocity and is said to be undergoing an acceleration. Average velocityVelocity is defined as the rate of change of position with respect to time, which may also be referred to as the instantaneous velocity to emphasize the distinction from the average velocity. In some applications the "average velocity" of an object might be needed, that is to say, the constant velocity that would provide the same resultant displacement as a variable velocity in the same time interval, v(t), over some time period Δt. Average velocity can be calculated as: 1.2 Acceleration In physics, acceleration is the rate of change of velocity of an object with respect to time. An object's acceleration is the net result of all forces acting on the object, as described by Newton's Second Law. The SI unit for acceleration is metre per second squared (m⋅s−2). For example, when a car starts from a standstill (zero velocity, in an inertial frame of reference) and travels in a straight line at increasing speeds, it is accelerating in the direction of travel. If the car turns, an acceleration occurs toward the new direction. The forward acceleration of the car is called a linear (or tangential) acceleration, the reaction to which passengers in the car experience as a force pushing them back into their seats. When changing direction, this is called radial (as orthogonal to tangential) acceleration, the reaction to which passengers experience as a sideways force. If the speed of the car decreases, this is an acceleration in the opposite direction of the velocity of the vehicle, sometimes called deceleration or Retrograde burning in spacecraft.Passengers experience the reaction to deceleration as a force pushing them forwards. Both acceleration and deceleration are treated the same, they are both changes in velocity. Each of these accelerations (tangential, radial, deceleration) is felt by passengers until their velocity (speed and direction) matches that of the uniformly moving car.  Average accelerationAcceleration is the rate of change of velocity. At any point on a trajectory, the magnitude of the acceleration is given by the rate of change of velocity in both magnitude and direction at that point. An object's average acceleration over a period of time is its change in velocity Δv divided by the duration of the period Δt. Mathematically:  Used Wikipedia for the theory and the images:[Velocity Theory](https://en.wikipedia.org/wiki/Velocity)[Acceleration Theory](https://en.wikipedia.org/wiki/Acceleration) 2.0 Experiment--- Now it's time to use the Proteas robot to help us on the experiment. We want to make Proteas to move forward a short distance of 10cm and take mesurments using the odometer. When Proteas return the mesurments we want to draw two graphs, one with the average velocity and one with the average acceleration.
###Code
from proteas_lib import control
control.start_lib()
velocity_logs = control.data_logger()
acceleration_logs = control.data_logger()
timer1 = control.timer()
motor_a = control.motor(17,27,22)
motor_b = control.motor(10,11,9)
odometer_a = control.odometer(6)
print("Experiment start!")
velocity_logs.clean_data()
acceleration_logs.clean_data()
timer1.start_timer()
odometer_a.reset()
motor_a.move()
motor_b.move()
previous_distance = 0
previous_v = 0
previous_t = 0
velocity_logs.store_value(0,0)
acceleration_logs.store_value(0,0)
while odometer_a.get_distance() < 10:
current_distance = odometer_a.get_distance()
current_t = timer1.get_elapsed()
# We will store the data on every distance displacement because the odometer sensor
#is very slow for continuous readings
if previous_distance != current_distance:
dt = current_t - previous_t
dx = (current_distance - previous_distance)*0.01 # Convert the cm to m
v = dx/dt
dv = v - previous_v
a = dv/dt
velocity_logs.store_value(current_t,v)
acceleration_logs.store_value(current_t,a)
previous_v = v
previous_distance = current_distance
previous_t = current_t
print(odometer_a.get_distance())
motor_a.stop()
motor_b.stop()
print("Experiment completed!")
###Output
Experiment start!
1.04
2.07
3.11
4.15
5.18
6.22
7.26
8.29
9.33
Experiment completed!
###Markdown
Average velocity
###Code
velocity_logs.draw_graph(type = "line")
###Output
_____no_output_____
###Markdown
Average acceleration
###Code
acceleration_logs.draw_graph(type = "line")
control.clean()
###Output
_____no_output_____ |
DGL_official_tutorial/DGLAtAGlance.ipynb | ###Markdown
[DGL at a Glance¶](https://docs.dgl.ai/en/0.6.x/tutorials/basics/1_first.html)+ “Zachary’s karate club” problem + ふたつの流派に分裂した空手について分析する.
###Code
import dgl
import numpy as np
def build_karate_club_graph():
# All 78 edges are stored in two numpy arrays. One for source endpoints
# while the other for destination endpoints.
src = np.array([1, 2, 2, 3, 3, 3, 4, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 9, 10, 10,
10, 11, 12, 12, 13, 13, 13, 13, 16, 16, 17, 17, 19, 19, 21, 21,
25, 25, 27, 27, 27, 28, 29, 29, 30, 30, 31, 31, 31, 31, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 33, 33, 33, 33, 33, 33, 33,
33, 33, 33, 33, 33, 33, 33, 33, 33, 33])
dst = np.array([0, 0, 1, 0, 1, 2, 0, 0, 0, 4, 5, 0, 1, 2, 3, 0, 2, 2, 0, 4,
5, 0, 0, 3, 0, 1, 2, 3, 5, 6, 0, 1, 0, 1, 0, 1, 23, 24, 2, 23,
24, 2, 23, 26, 1, 8, 0, 24, 25, 28, 2, 8, 14, 15, 18, 20, 22, 23,
29, 30, 31, 8, 9, 13, 14, 15, 18, 19, 20, 22, 23, 26, 27, 28, 29, 30,
31, 32])
# Edges are directional in DGL; Make them bi-directional.
u = np.concatenate([src, dst])
v = np.concatenate([dst, src])
# Construct a DGLGraph
return dgl.graph((u, v))
G = build_karate_club_graph()
print('We have %d nodes.' % G.number_of_nodes())
print('We have %d edges.' % G.number_of_edges())
import networkx as nx
# Since the actual graph is undirected, we convert it for visualization
# purpose.
nx_G = G.to_networkx().to_undirected()
# Kamada-Kawaii layout usually looks pretty for arbitrary graphs
pos = nx.kamada_kawai_layout(nx_G)
nx.draw(nx_G, pos, with_labels=True, node_color=[[.7, .7, .7]])
# In DGL, you can add features for all nodes at once, using a feature tensor that
# batches node features along the first dimension. The code below adds the learnable
# embeddings for all nodes:
import torch
import torch.nn as nn
import torch.nn.functional as F
embed = nn.Embedding(34, 5) # 34 nodes with embedding dim equal to 5
G.ndata['feat'] = embed.weight
# print out node 2's input feature
print(G.ndata['feat'][2])
# print out node 10 and 11's input features
print(G.ndata['feat'][[10, 11]])
# GraphConvは隣接したノードからの情報を非線形変換によって次のレイヤーに渡す.
from dgl.nn.pytorch import GraphConv
class GCN(nn.Module):
def __init__(self, in_feats, hidden_size, num_classes):
super(GCN, self).__init__()
self.conv1 = GraphConv(in_feats, hidden_size)
self.conv2 = GraphConv(hidden_size, num_classes)
def forward(self, g, inputs):
h = self.conv1(g, inputs)
h = torch.relu(h)
h = self.conv2(g, h)
return h
# The first layer transforms input features of size of 5 to a hidden size of 5.
# The second layer transforms the hidden layer and produces output features of
# size 2, corresponding to the two groups of the karate club.
net = GCN(5, 5, 2)
# semi-unsupervidesなので入力はそのままにしておく
inputs = embed.weight
labeled_nodes = torch.tensor([0, 33]) # only the instructor and the president nodes are labeled
labels = torch.tensor([0, 1]) # their labels are different
import itertools
optimizer = torch.optim.Adam(itertools.chain(net.parameters(), embed.parameters()), lr=0.01)
all_logits = []
for epoch in range(50):
logits = net(G, inputs)
# we save the logits for visualization later
all_logits.append(logits.detach())
logp = F.log_softmax(logits, 1)
# we only compute loss for labeled nodes
loss = F.nll_loss(logp[labeled_nodes], labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Epoch %d | Loss: %.4f' % (epoch, loss.item()))
###Output
Epoch 0 | Loss: 0.7100
Epoch 1 | Loss: 0.6903
Epoch 2 | Loss: 0.6729
Epoch 3 | Loss: 0.6589
Epoch 4 | Loss: 0.6467
Epoch 5 | Loss: 0.6348
Epoch 6 | Loss: 0.6219
Epoch 7 | Loss: 0.6083
Epoch 8 | Loss: 0.5946
Epoch 9 | Loss: 0.5801
Epoch 10 | Loss: 0.5651
Epoch 11 | Loss: 0.5490
Epoch 12 | Loss: 0.5322
Epoch 13 | Loss: 0.5149
Epoch 14 | Loss: 0.4971
Epoch 15 | Loss: 0.4786
Epoch 16 | Loss: 0.4596
Epoch 17 | Loss: 0.4401
Epoch 18 | Loss: 0.4204
Epoch 19 | Loss: 0.4007
Epoch 20 | Loss: 0.3812
Epoch 21 | Loss: 0.3615
Epoch 22 | Loss: 0.3415
Epoch 23 | Loss: 0.3213
Epoch 24 | Loss: 0.3012
Epoch 25 | Loss: 0.2809
Epoch 26 | Loss: 0.2605
Epoch 27 | Loss: 0.2402
Epoch 28 | Loss: 0.2203
Epoch 29 | Loss: 0.2011
Epoch 30 | Loss: 0.1827
Epoch 31 | Loss: 0.1651
Epoch 32 | Loss: 0.1485
Epoch 33 | Loss: 0.1330
Epoch 34 | Loss: 0.1186
Epoch 35 | Loss: 0.1054
Epoch 36 | Loss: 0.0934
Epoch 37 | Loss: 0.0824
Epoch 38 | Loss: 0.0726
Epoch 39 | Loss: 0.0638
Epoch 40 | Loss: 0.0560
Epoch 41 | Loss: 0.0491
Epoch 42 | Loss: 0.0432
Epoch 43 | Loss: 0.0379
Epoch 44 | Loss: 0.0333
Epoch 45 | Loss: 0.0294
Epoch 46 | Loss: 0.0259
Epoch 47 | Loss: 0.0229
Epoch 48 | Loss: 0.0203
Epoch 49 | Loss: 0.0180
|
examples/notebooks/data_analyzer__stats_generator.ipynb | ###Markdown
ANOVOS - Statistic Generator Following notebook shows the list of functions related to "stats generator" module provided under ANOVOS package and how it can be invoked accordinglyGlobal SummaryMeasures of CountsMeasures of Central TendencyMeasures of CardinalityMeasures of DispersionMeasures of PercentilesMeasures of Shape **Setting Spark Session**
###Code
from anovos.shared.spark import *
###Output
_____no_output_____
###Markdown
**Input/Output Path**
###Code
inputPath = "../data/income_dataset/csv"
outputPath = "../output/income_dataset/data_analyzer"
from anovos.data_ingest.data_ingest import read_dataset
df = read_dataset(spark, file_path = inputPath, file_type = "csv",file_configs = {"header": "True",
"delimiter": "," ,
"inferSchema": "True"})
df.toPandas().head(5)
###Output
_____no_output_____
###Markdown
Global Summary- API specification of function **global_summary** can be found here
###Code
from anovos.data_analyzer.stats_generator import global_summary
# Example 1 - with manadatory arguments (rest arguments have default values)
odf = global_summary(spark, df)
odf.toPandas()
# Example 2 - 'all' columns (excluding drop_cols)
odf = global_summary(spark, idf = df, list_of_cols='all', drop_cols=['ifa'])
odf.toPandas()
# Example 3 - selected columns
odf = global_summary(spark, idf = df, list_of_cols= ['age','sex','race','workclass','fnlwgt'])
odf.toPandas()
###Output
No. of Rows: 32,561
No. of Columns: 5
Numerical Columns: 2
['fnlwgt', 'age']
Categorical Columns: 3
['workclass', 'race', 'sex']
###Markdown
Measures of Counts - API specification of function **measures_of_counts** can be found here- Non zero count/% calculated only for numerical columns
###Code
from anovos.data_analyzer.stats_generator import measures_of_counts, nonzeroCount_computation
# Example 1 - with manadatory arguments (rest arguments have default values)
odf = measures_of_counts(spark, df)
odf.toPandas()
# Example 2 - 'all' columns (excluding drop_cols)
odf = measures_of_counts(spark, idf = df, list_of_cols='all', drop_cols=['ifa'])
odf.toPandas()
# Example 3 - selected columns
odf = measures_of_counts(spark, idf = df, list_of_cols= ['age','sex','race','workclass','fnlwgt'])
odf.toPandas()
# Example 4 - only numerical columns
odf = measures_of_counts(spark, idf = df, list_of_cols= ['age','education-num','capital-gain'])
odf.toPandas()
# Example 5 - only categorical columns (user warning is shown as nonon-zero computation didn't happen due to absence of any numerical column)
odf = measures_of_counts(spark, idf = df, list_of_cols= ['sex','race','workclass'])
odf.toPandas()
###Output
/Users/chensinuo/anaconda3/envs/py3/lib/python3.7/site-packages/anovos/data_analyzer/stats_generator.py:132: UserWarning: No Non-Zero Count Computation
warnings.warn("No Non-Zero Count Computation")
###Markdown
Measures of Central Tendency - API specification of function **measures_of_centralTendency** can be found here- Mode & Mode% calculated only for discrete columns (string + integer datatypes)
###Code
from anovos.data_analyzer.stats_generator import measures_of_centralTendency
# Example 1 - with manadatory arguments (rest arguments have default values)
odf = measures_of_centralTendency(spark, df)
odf.toPandas()
# Example 2 - 'all' columns (excluding drop_cols)
odf = measures_of_centralTendency(spark, idf = df, list_of_cols='all', drop_cols=['ifa'])
odf.toPandas()
# Example 3 - selected columns
odf = measures_of_centralTendency(spark, idf = df, list_of_cols= ['age','sex','race','workclass','fnlwgt'])
odf.toPandas()
# Example 4 - only numerical columns
odf = measures_of_centralTendency(spark, idf = df, list_of_cols= ['age','education-num','capital-gain','logfnl'])
odf.toPandas()
# Example 5 - only categorical columns
odf = measures_of_centralTendency(spark, idf = df, list_of_cols= ['sex','race','workclass'])
odf.toPandas()
###Output
_____no_output_____
###Markdown
Measures of Cardinality - API specification of function **measures_of_cardinality** can be found here- Calculated only for discrete columns (string + integer datatypes)
###Code
from anovos.data_analyzer.stats_generator import measures_of_cardinality
# Example 1 - with manadatory arguments (rest arguments have default values)
odf = measures_of_cardinality(spark, df)
odf.toPandas()
# Example 2 - 'all' columns (excluding drop_cols)
odf = measures_of_cardinality(spark, idf = df, list_of_cols='all', drop_cols=['ifa'])
odf.toPandas()
# Example 3 - selected columns
odf = measures_of_cardinality(spark, idf = df, list_of_cols= ['age','sex','race','workclass','fnlwgt'])
odf.toPandas()
# Example 4 - only numerical columns
odf = measures_of_cardinality(spark, idf = df, list_of_cols= ['age','education-num','capital-gain','logfnl'])
odf.toPandas()
# Example 5 - only categorical columns
odf = measures_of_cardinality(spark, idf = df, list_of_cols= ['sex','race','workclass'])
odf.toPandas()
###Output
_____no_output_____
###Markdown
Measures of Dispersion - API specification of function **measures_of_dispersion** can be found here- Supports only numerical columns
###Code
from anovos.data_analyzer.stats_generator import measures_of_dispersion
# Example 1 - with manadatory arguments (rest arguments have default values)
odf = measures_of_dispersion(spark, df)
odf.toPandas()
# Example 2 - 'all' columns (excluding drop_cols)
odf = measures_of_dispersion(spark, idf = df, list_of_cols='all', drop_cols=['capital-loss'])
odf.toPandas()
# Example 3 - selected numerical columns
odf = measures_of_dispersion(spark, idf = df, list_of_cols= ['age','education-num','capital-gain','logfnl'])
odf.toPandas()
###Output
_____no_output_____
###Markdown
Measures of Percentiles - API specification of function **measures_of_percentiles** can be found here- Supports only numerical columns
###Code
from anovos.data_analyzer.stats_generator import measures_of_percentiles
# Example 1 - with manadatory arguments (rest arguments have default values)
odf = measures_of_percentiles(spark, df)
odf.toPandas()
# Example 2 - 'all' columns (excluding drop_cols)
odf = measures_of_percentiles(spark, idf = df, list_of_cols='all', drop_cols=['capital-gain'])
odf.toPandas()
# Example 3 - selected numerical columns
odf = measures_of_percentiles(spark, idf = df, list_of_cols= ['age','education-num','capital-gain','logfnl'])
odf.toPandas()
###Output
_____no_output_____
###Markdown
Measures of Shape - API specification of function **measures_of_shape** can be found here- Supports only numerical columns
###Code
from anovos.data_analyzer.stats_generator import measures_of_shape
# Example 1 - with manadatory arguments (rest arguments have default values)
odf = measures_of_shape(spark, df)
odf.toPandas()
# Example 2 - 'all' columns (excluding drop_cols)
odf = measures_of_shape(spark, idf = df, list_of_cols='all', drop_cols=['capital-gain'])
odf.toPandas()
# Example 3 - selected numerical columns
odf = measures_of_shape(spark, idf = df, list_of_cols= ['age','education-num','capital-gain','logfnl'])
odf.toPandas()
###Output
_____no_output_____
###Markdown
Global Summary- API specification of function **global_summary** can be found here
###Code
from anovos.data_analyzer.stats_generator import global_summary
# Example 1 - with manadatory arguments (rest arguments have default values)
odf = global_summary(spark, df)
odf.toPandas()
# Example 2 - 'all' columns (excluding drop_cols)
odf = global_summary(spark, idf = df, list_of_cols='all', drop_cols=['ifa'])
odf.toPandas()
# Example 3 - selected columns
odf = global_summary(spark, idf = df, list_of_cols= ['age','sex','race','workclass','fnlwgt'])
odf.toPandas()
###Output
_____no_output_____
###Markdown
Measures of Counts - API specification of function **measures_of_counts** can be found here- Non zero count/% calculated only for numerical columns
###Code
from anovos.data_analyzer.stats_generator import measures_of_counts, nonzeroCount_computation
# Example 1 - with manadatory arguments (rest arguments have default values)
odf = measures_of_counts(spark, df)
odf.toPandas()
# Example 2 - 'all' columns (excluding drop_cols)
odf = measures_of_counts(spark, idf = df, list_of_cols='all', drop_cols=['ifa'])
odf.toPandas()
# Example 3 - selected columns
odf = measures_of_counts(spark, idf = df, list_of_cols= ['age','sex','race','workclass','fnlwgt'])
odf.toPandas()
# Example 4 - only numerical columns
odf = measures_of_counts(spark, idf = df, list_of_cols= ['age','education-num','capital-gain'])
odf.toPandas()
# Example 5 - only categorical columns (user warning is shown as nonon-zero computation didn't happen due to absence of any numerical column)
odf = measures_of_counts(spark, idf = df, list_of_cols= ['sex','race','workclass'])
odf.toPandas()
###Output
_____no_output_____
###Markdown
Measures of Central Tendency - API specification of function **measures_of_centralTendency** can be found here- Mode & Mode% calculated only for discrete columns (string + integer datatypes)
###Code
from anovos.data_analyzer.stats_generator import measures_of_centralTendency
# Example 1 - with manadatory arguments (rest arguments have default values)
odf = measures_of_centralTendency(spark, df)
odf.toPandas()
# Example 2 - 'all' columns (excluding drop_cols)
odf = measures_of_centralTendency(spark, idf = df, list_of_cols='all', drop_cols=['ifa'])
odf.toPandas()
# Example 3 - selected columns
odf = measures_of_centralTendency(spark, idf = df, list_of_cols= ['age','sex','race','workclass','fnlwgt'])
odf.toPandas()
# Example 4 - only numerical columns
odf = measures_of_centralTendency(spark, idf = df, list_of_cols= ['age','education-num','capital-gain','logfnl'])
odf.toPandas()
# Example 5 - only categorical columns
odf = measures_of_centralTendency(spark, idf = df, list_of_cols= ['sex','race','workclass'])
odf.toPandas()
###Output
_____no_output_____
###Markdown
Measures of Cardinality - API specification of function **measures_of_cardinality** can be found here- Calculated only for discrete columns (string + integer datatypes)
###Code
from anovos.data_analyzer.stats_generator import measures_of_cardinality
# Example 1 - with manadatory arguments (rest arguments have default values)
odf = measures_of_cardinality(spark, df)
odf.toPandas()
# Example 2 - 'all' columns (excluding drop_cols)
odf = measures_of_cardinality(spark, idf = df, list_of_cols='all', drop_cols=['ifa'])
odf.toPandas()
# Example 3 - selected columns
odf = measures_of_cardinality(spark, idf = df, list_of_cols= ['age','sex','race','workclass','fnlwgt'])
odf.toPandas()
# Example 4 - only numerical columns
odf = measures_of_cardinality(spark, idf = df, list_of_cols= ['age','education-num','capital-gain','logfnl'])
odf.toPandas()
# Example 5 - only categorical columns
odf = measures_of_cardinality(spark, idf = df, list_of_cols= ['sex','race','workclass'])
odf.toPandas()
###Output
_____no_output_____
###Markdown
Measures of Dispersion - API specification of function **measures_of_dispersion** can be found here- Supports only numerical columns
###Code
from anovos.data_analyzer.stats_generator import measures_of_dispersion
# Example 1 - with manadatory arguments (rest arguments have default values)
odf = measures_of_dispersion(spark, df)
odf.toPandas()
# Example 2 - 'all' columns (excluding drop_cols)
odf = measures_of_dispersion(spark, idf = df, list_of_cols='all', drop_cols=['capital-loss'])
odf.toPandas()
# Example 3 - selected numerical columns
odf = measures_of_dispersion(spark, idf = df, list_of_cols= ['age','education-num','capital-gain','logfnl'])
odf.toPandas()
###Output
_____no_output_____
###Markdown
Measures of Percentiles - API specification of function **measures_of_percentiles** can be found here- Supports only numerical columns
###Code
from anovos.data_analyzer.stats_generator import measures_of_percentiles
# Example 1 - with manadatory arguments (rest arguments have default values)
odf = measures_of_percentiles(spark, df)
odf.toPandas()
# Example 2 - 'all' columns (excluding drop_cols)
odf = measures_of_percentiles(spark, idf = df, list_of_cols='all', drop_cols=['capital-gain'])
odf.toPandas()
# Example 3 - selected numerical columns
odf = measures_of_percentiles(spark, idf = df, list_of_cols= ['age','education-num','capital-gain','logfnl'])
odf.toPandas()
###Output
_____no_output_____
###Markdown
Measures of Shape - API specification of function **measures_of_shape** can be found here- Supports only numerical columns
###Code
from anovos.data_analyzer.stats_generator import measures_of_shape
# Example 1 - with manadatory arguments (rest arguments have default values)
odf = measures_of_shape(spark, df)
odf.toPandas()
# Example 2 - 'all' columns (excluding drop_cols)
odf = measures_of_shape(spark, idf = df, list_of_cols='all', drop_cols=['capital-gain'])
odf.toPandas()
# Example 3 - selected numerical columns
odf = measures_of_shape(spark, idf = df, list_of_cols= ['age','education-num','capital-gain','logfnl'])
odf.toPandas()
###Output
_____no_output_____
###Markdown
ANOVOS - Statistic GeneratorFollowing notebook shows the list of functions related to "stats generator" module provided under ANOVOS package and how it can be invoked accordingly.* [Global Summary](Global-Summary)* [Measures of Counts](Measures-of-Counts)* [Measures of Central Tendency](Measures-of-Central-Tendency)* [Measures of Cardinality](Measures-of-Cardinality)* [Measures of Dispersion](Measures-of-Dispersion)* [Measures of Percentiles](Measures-of-Percentiles)* [Measures of Shape](Measures-of-Shape) **Setting Spark Session**
###Code
from anovos.shared.spark import *
sc.setLogLevel("ERROR")
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
**Input/Output Path**
###Code
inputPath = "../data/income_dataset/csv"
outputPath = "../output/income_dataset/data_analyzer"
from anovos.data_ingest.data_ingest import read_dataset
df = read_dataset(spark, file_path = inputPath, file_type = "csv",file_configs = {"header": "True",
"delimiter": "," ,
"inferSchema": "True"})
df = df.drop("dt_1", "dt_2")
df.toPandas().head(5)
###Output
_____no_output_____ |
Section3/4_working_with_other_models_from_tfcontriblearn.ipynb | ###Markdown
Working with other models from tf.contrib.learn
###Code
import tensorflow as tf
import matplotlib.pyplot as plt
import os
import pandas as pd
from sklearn.model_selection import train_test_split
%matplotlib inline
###Output
_____no_output_____
###Markdown
1. Load and prepare the data
###Code
current_dir = os.getcwd()
dataset_path = os.path.join(os.getcwd(), os.pardir, 'data', 'diamond_prices_original.csv')
diamonds = pd.read_csv(dataset_path)
TARGET = 'price'
## Separating into training and testing: the objects we got back are pandas DataFrames.
X_train, X_val, y_train, y_val = train_test_split(diamonds.drop(TARGET, axis=1),
diamonds[TARGET],
train_size=0.9,
random_state=60)
diamonds.head(10)
###Output
_____no_output_____
###Markdown
2. Write one or more dataset importing functions
###Code
BATCH_SIZE = 128
N_EPOCHS = 400
## Training input function
input_fn_train = tf.estimator.inputs.pandas_input_fn(
x=X_train,
y=y_train,
batch_size=BATCH_SIZE,
num_epochs=N_EPOCHS,
shuffle = True,
target_column=TARGET)
## Validation input function
input_fn_val = tf.estimator.inputs.pandas_input_fn(
x=X_val,
y=y_val,
num_epochs=1,
shuffle = False,
target_column=TARGET)
###Output
_____no_output_____
###Markdown
3. Define the features for the "deep" part of the model (DNN)Here we commonly use the numerical features or other dense columns in our data.
###Code
numerical_cols = ['carat', 'depth', 'table', 'x', 'y', 'z']
categorical_cols = ['cut', 'color', 'clarity']
unique_values = {col: list(diamonds[col].unique()) for col in categorical_cols}
cut = tf.feature_column.categorical_column_with_vocabulary_list('cut',
vocabulary_list=list(diamonds['cut'].unique()))
deep_columns = [tf.feature_column.numeric_column(col) for col in numerical_cols]
deep_columns = deep_columns + [tf.feature_column.indicator_column(cut)]
###Output
_____no_output_____
###Markdown
4. Define the features for the "wide" part of the model (linear model)You can create interactions that result in sparse columns in this step.
###Code
clarity = tf.feature_column.categorical_column_with_vocabulary_list('clarity',
vocabulary_list=list(diamonds['clarity'].unique()))
color = tf.feature_column.categorical_column_with_vocabulary_list('color',
vocabulary_list=list(diamonds['color'].unique()))
# Creating the 3 interaction features
# tf.feature_column.crossed_column: Returns a column for performing crosses of categorical features.
cut_x_color = tf.feature_column.crossed_column(['cut', 'color'], hash_bucket_size=5*7)
cut_x_clarity = tf.feature_column.crossed_column(['cut', 'clarity'], hash_bucket_size=5*8)
color_x_clarity = tf.feature_column.crossed_column(['color', 'clarity'], hash_bucket_size=7*8)
wide_columns = [color, clarity, cut_x_color, cut_x_clarity, color_x_clarity]
###Output
_____no_output_____
###Markdown
5. Instantiate the DNNLinearCombinedRegressor Estimator
###Code
combined_model = tf.estimator.DNNLinearCombinedRegressor(
model_dir='./wide_and_deep_model',
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[64, 32, 16, 8])
###Output
INFO:tensorflow:Using default config.
INFO:tensorflow:Using config: {'_model_dir': './wide_and_deep_model', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': None, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x0000023D9F48E400>, '_task_type': 'worker', '_task_id': 0, '_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}
###Markdown
5. Train the model
###Code
combined_model.train(input_fn=input_fn_train)
###Output
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Saving checkpoints for 1 into ./wide_and_deep_model\model.ckpt.
INFO:tensorflow:loss = 4.14569e+09, step = 1
INFO:tensorflow:global_step/sec: 190.763
INFO:tensorflow:loss = 3.48952e+09, step = 101 (0.527 sec)
INFO:tensorflow:global_step/sec: 314.002
INFO:tensorflow:loss = 3.84351e+09, step = 201 (0.319 sec)
INFO:tensorflow:global_step/sec: 287.781
INFO:tensorflow:loss = 4.21204e+09, step = 301 (0.348 sec)
INFO:tensorflow:global_step/sec: 331.264
INFO:tensorflow:loss = 3.55313e+09, step = 401 (0.301 sec)
INFO:tensorflow:global_step/sec: 260.996
INFO:tensorflow:loss = 3.73544e+09, step = 501 (0.384 sec)
INFO:tensorflow:global_step/sec: 281.039
INFO:tensorflow:loss = 2.89769e+09, step = 601 (0.356 sec)
INFO:tensorflow:global_step/sec: 239.653
INFO:tensorflow:loss = 4.20192e+09, step = 701 (0.418 sec)
INFO:tensorflow:global_step/sec: 286.951
INFO:tensorflow:loss = 4.17243e+09, step = 801 (0.353 sec)
INFO:tensorflow:global_step/sec: 244.694
INFO:tensorflow:loss = 3.60422e+09, step = 901 (0.409 sec)
INFO:tensorflow:global_step/sec: 230.061
INFO:tensorflow:loss = 3.93851e+09, step = 1001 (0.431 sec)
INFO:tensorflow:global_step/sec: 280.131
INFO:tensorflow:loss = 3.79947e+09, step = 1101 (0.356 sec)
INFO:tensorflow:global_step/sec: 213.92
INFO:tensorflow:loss = 3.63821e+09, step = 1201 (0.472 sec)
INFO:tensorflow:global_step/sec: 236.064
INFO:tensorflow:loss = 3.63248e+09, step = 1301 (0.419 sec)
INFO:tensorflow:global_step/sec: 306.361
INFO:tensorflow:loss = 3.10098e+09, step = 1401 (0.327 sec)
INFO:tensorflow:global_step/sec: 250.359
INFO:tensorflow:loss = 4.00021e+09, step = 1501 (0.404 sec)
INFO:tensorflow:global_step/sec: 213.418
INFO:tensorflow:loss = 2.53882e+09, step = 1601 (0.467 sec)
INFO:tensorflow:global_step/sec: 214.252
INFO:tensorflow:loss = 3.54049e+09, step = 1701 (0.467 sec)
INFO:tensorflow:global_step/sec: 241.131
INFO:tensorflow:loss = 2.91181e+09, step = 1801 (0.412 sec)
INFO:tensorflow:global_step/sec: 329.597
INFO:tensorflow:loss = 3.2497e+09, step = 1901 (0.302 sec)
INFO:tensorflow:global_step/sec: 279.288
INFO:tensorflow:loss = 2.87008e+09, step = 2001 (0.359 sec)
INFO:tensorflow:global_step/sec: 298.095
INFO:tensorflow:loss = 2.48978e+09, step = 2101 (0.337 sec)
INFO:tensorflow:global_step/sec: 234.747
INFO:tensorflow:loss = 2.16754e+09, step = 2201 (0.427 sec)
INFO:tensorflow:global_step/sec: 215.87
INFO:tensorflow:loss = 3.19487e+09, step = 2301 (0.465 sec)
INFO:tensorflow:global_step/sec: 268.457
INFO:tensorflow:loss = 2.91337e+09, step = 2401 (0.375 sec)
INFO:tensorflow:global_step/sec: 217.285
INFO:tensorflow:loss = 2.82307e+09, step = 2501 (0.457 sec)
INFO:tensorflow:global_step/sec: 290.765
INFO:tensorflow:loss = 2.7558e+09, step = 2601 (0.340 sec)
INFO:tensorflow:global_step/sec: 313.016
INFO:tensorflow:loss = 3.36694e+09, step = 2701 (0.320 sec)
INFO:tensorflow:global_step/sec: 245.578
INFO:tensorflow:loss = 2.53536e+09, step = 2801 (0.412 sec)
INFO:tensorflow:global_step/sec: 248.211
INFO:tensorflow:loss = 2.29621e+09, step = 2901 (0.398 sec)
INFO:tensorflow:global_step/sec: 280.97
INFO:tensorflow:loss = 2.58263e+09, step = 3001 (0.356 sec)
INFO:tensorflow:global_step/sec: 259.307
INFO:tensorflow:loss = 2.83381e+09, step = 3101 (0.388 sec)
INFO:tensorflow:global_step/sec: 236.628
INFO:tensorflow:loss = 1.15102e+09, step = 3201 (0.420 sec)
INFO:tensorflow:global_step/sec: 264.503
INFO:tensorflow:loss = 2.35967e+09, step = 3301 (0.381 sec)
INFO:tensorflow:global_step/sec: 239.926
INFO:tensorflow:loss = 2.69542e+09, step = 3401 (0.418 sec)
INFO:tensorflow:global_step/sec: 227.961
INFO:tensorflow:loss = 2.7467e+09, step = 3501 (0.440 sec)
INFO:tensorflow:global_step/sec: 262.019
INFO:tensorflow:loss = 2.70216e+09, step = 3601 (0.380 sec)
INFO:tensorflow:global_step/sec: 325.345
INFO:tensorflow:loss = 1.50157e+09, step = 3701 (0.306 sec)
INFO:tensorflow:global_step/sec: 334.022
INFO:tensorflow:loss = 2.53916e+09, step = 3801 (0.298 sec)
INFO:tensorflow:global_step/sec: 323.467
INFO:tensorflow:loss = 2.07837e+09, step = 3901 (0.309 sec)
INFO:tensorflow:global_step/sec: 324.491
INFO:tensorflow:loss = 1.22639e+09, step = 4001 (0.310 sec)
INFO:tensorflow:global_step/sec: 318.373
INFO:tensorflow:loss = 2.11551e+09, step = 4101 (0.313 sec)
INFO:tensorflow:global_step/sec: 332.121
INFO:tensorflow:loss = 1.74341e+09, step = 4201 (0.301 sec)
INFO:tensorflow:global_step/sec: 335.285
INFO:tensorflow:loss = 3.71026e+09, step = 4301 (0.297 sec)
INFO:tensorflow:global_step/sec: 276.621
INFO:tensorflow:loss = 1.91491e+09, step = 4401 (0.362 sec)
INFO:tensorflow:global_step/sec: 330.212
INFO:tensorflow:loss = 1.2438e+09, step = 4501 (0.304 sec)
INFO:tensorflow:global_step/sec: 336.077
INFO:tensorflow:loss = 2.16256e+09, step = 4601 (0.298 sec)
INFO:tensorflow:global_step/sec: 325.876
INFO:tensorflow:loss = 2.04232e+09, step = 4701 (0.305 sec)
INFO:tensorflow:global_step/sec: 282.245
INFO:tensorflow:loss = 1.48231e+09, step = 4801 (0.355 sec)
INFO:tensorflow:global_step/sec: 339.967
INFO:tensorflow:loss = 1.58778e+09, step = 4901 (0.294 sec)
INFO:tensorflow:global_step/sec: 328.594
INFO:tensorflow:loss = 1.96082e+09, step = 5001 (0.304 sec)
INFO:tensorflow:global_step/sec: 326.777
INFO:tensorflow:loss = 2.13348e+09, step = 5101 (0.306 sec)
INFO:tensorflow:global_step/sec: 320.344
INFO:tensorflow:loss = 1.41504e+09, step = 5201 (0.312 sec)
INFO:tensorflow:global_step/sec: 259.036
INFO:tensorflow:loss = 1.69903e+09, step = 5301 (0.385 sec)
INFO:tensorflow:global_step/sec: 333.513
INFO:tensorflow:loss = 1.84054e+09, step = 5401 (0.302 sec)
INFO:tensorflow:global_step/sec: 339.688
INFO:tensorflow:loss = 1.90571e+09, step = 5501 (0.295 sec)
INFO:tensorflow:global_step/sec: 293.103
INFO:tensorflow:loss = 1.9535e+09, step = 5601 (0.340 sec)
INFO:tensorflow:global_step/sec: 338.579
INFO:tensorflow:loss = 1.60929e+09, step = 5701 (0.294 sec)
INFO:tensorflow:global_step/sec: 332.499
INFO:tensorflow:loss = 1.91128e+09, step = 5801 (0.301 sec)
INFO:tensorflow:global_step/sec: 328.285
INFO:tensorflow:loss = 1.79031e+09, step = 5901 (0.305 sec)
INFO:tensorflow:global_step/sec: 339.923
INFO:tensorflow:loss = 2.10594e+09, step = 6001 (0.294 sec)
INFO:tensorflow:global_step/sec: 340.789
INFO:tensorflow:loss = 2.09853e+09, step = 6101 (0.291 sec)
INFO:tensorflow:global_step/sec: 308.106
INFO:tensorflow:loss = 1.98246e+09, step = 6201 (0.326 sec)
INFO:tensorflow:global_step/sec: 342.142
INFO:tensorflow:loss = 1.79039e+09, step = 6301 (0.293 sec)
INFO:tensorflow:global_step/sec: 312.913
INFO:tensorflow:loss = 1.77165e+09, step = 6401 (0.318 sec)
INFO:tensorflow:global_step/sec: 339.047
INFO:tensorflow:loss = 1.80737e+09, step = 6501 (0.297 sec)
INFO:tensorflow:global_step/sec: 319.905
INFO:tensorflow:loss = 2.24852e+09, step = 6601 (0.312 sec)
INFO:tensorflow:global_step/sec: 313.604
INFO:tensorflow:loss = 1.49459e+09, step = 6701 (0.318 sec)
INFO:tensorflow:global_step/sec: 343.657
INFO:tensorflow:loss = 2.29318e+09, step = 6801 (0.293 sec)
INFO:tensorflow:global_step/sec: 335.129
INFO:tensorflow:loss = 1.61947e+09, step = 6901 (0.297 sec)
INFO:tensorflow:global_step/sec: 347.574
INFO:tensorflow:loss = 2.88417e+09, step = 7001 (0.288 sec)
INFO:tensorflow:global_step/sec: 297.682
INFO:tensorflow:loss = 2.54988e+09, step = 7101 (0.337 sec)
INFO:tensorflow:global_step/sec: 321.379
INFO:tensorflow:loss = 1.66533e+09, step = 7201 (0.310 sec)
INFO:tensorflow:global_step/sec: 340.424
INFO:tensorflow:loss = 1.78387e+09, step = 7301 (0.294 sec)
INFO:tensorflow:global_step/sec: 326.855
INFO:tensorflow:loss = 1.94038e+09, step = 7401 (0.306 sec)
INFO:tensorflow:global_step/sec: 295.049
INFO:tensorflow:loss = 1.16564e+09, step = 7501 (0.338 sec)
INFO:tensorflow:global_step/sec: 339.01
INFO:tensorflow:loss = 1.72192e+09, step = 7601 (0.296 sec)
INFO:tensorflow:global_step/sec: 325.301
INFO:tensorflow:loss = 2.12822e+09, step = 7701 (0.307 sec)
INFO:tensorflow:global_step/sec: 325.906
INFO:tensorflow:loss = 1.90154e+09, step = 7801 (0.309 sec)
INFO:tensorflow:global_step/sec: 283.312
INFO:tensorflow:loss = 1.78223e+09, step = 7901 (0.350 sec)
INFO:tensorflow:global_step/sec: 340.24
INFO:tensorflow:loss = 1.66893e+09, step = 8001 (0.295 sec)
###Markdown
6. Visualize/analyze the results of the model
###Code
combined_model.evaluate(input_fn=input_fn_val)
## This returns an iterator
predictions = combined_model.predict(input_fn=input_fn_val)
## Transform the values to a pandas series
predictions = pd.Series([x['predictions'][0] for x in predictions], index=y_val.index)
fig, ax = plt.subplots(figsize=(8,5))
ax.scatter(x=predictions, y=y_val, s=0.5)
ax.set_xlabel('Predicted prices')
ax.set_ylabel('Observed prices')
ax.set_title("Predictions vs. Observed Values in the validation set");
fig, ax = plt.subplots(figsize=(8,5))
ax.hist(abs(y_val-predictions), bins=100, edgecolor='black')
ax.set_xlim(0, 4e3);
###Output
_____no_output_____ |
site/en/guide/migrate/migration_debugging.ipynb | ###Markdown
Unit tests There are a few types of unit testing that can help debug your migration code.1. Single forward pass validation2. Model training numerical equivalence validation for a few steps3. Benchmark inference performance4. The trained model makes correct predictions on fixed and simple data pointsYou can use `@parameterized.parameters` to test models with different configurations. [Details with code sample](https://github.com/abseil/abseil-py/blob/master/absl/testing/parameterized.py).Note that it's possible to run session APIs and eager execution in the same test case. The code snippets below show how.
###Code
import unittest
class TestNumericalEquivalence(unittest.TestCase):
# copied from code samples above
def setup(self):
# record statistics for 100 training steps
step_num = 100
# setup TF 1 model
random_tool = v1.keras.utils.DeterministicRandomTestTool(mode='num_random_ops')
with random_tool.scope():
# run TF1.x code in graph mode with context management
graph = tf.Graph()
with graph.as_default(), tf.compat.v1.Session(graph=graph) as sess:
self.model_tf1 = SimpleModelWrapper()
# build the model
inputs = tf.compat.v1.placeholder(tf.float32, shape=(None, params['input_size']))
labels = tf.compat.v1.placeholder(tf.float32, shape=(None, params['num_classes']))
spec = self.model_tf1.model_fn(inputs, labels, tf.estimator.ModeKeys.TRAIN, params)
train_op = spec.train_op
sess.run(tf.compat.v1.global_variables_initializer())
for step in range(step_num):
# log everything and update the model for one step
logs, _ = sess.run(
[self.model_tf1.logged_ops, train_op],
feed_dict={inputs: fake_x, labels: fake_y})
self.model_tf1.update_logs(logs)
# setup TF2 model
random_tool = v1.keras.utils.DeterministicRandomTestTool(mode='num_random_ops')
with random_tool.scope():
self.model_tf2 = SimpleModel(params)
for step in range(step_num):
self.model_tf2.train_step([fake_x, fake_y])
def test_learning_rate(self):
np.testing.assert_allclose(
self.model_tf1.logs['lr'],
self.model_tf2.logs['lr'])
def test_training_loss(self):
# adopt different tolerance strategies before and after 10 steps
first_n_step = 10
# abosolute difference is limited below 1e-5
# set `equal_nan` to be False to detect potential NaN loss issues
abosolute_tolerance = 1e-5
np.testing.assert_allclose(
actual=self.model_tf1.logs['loss'][:first_n_step],
desired=self.model_tf2.logs['loss'][:first_n_step],
atol=abosolute_tolerance,
equal_nan=False)
# relative difference is limited below 5%
relative_tolerance = 0.05
np.testing.assert_allclose(self.model_tf1.logs['loss'][first_n_step:],
self.model_tf2.logs['loss'][first_n_step:],
rtol=relative_tolerance,
equal_nan=False)
###Output
_____no_output_____
###Markdown
Debugging tools tf.printtf.print vs print/logging.info- With configurable arguments, `tf.print` can recursively display show first and last few elements of each dimension for printed tensors. Check the [API docs](https://www.tensorflow.org/api_docs/python/tf/print) for details.- For eager execution, both `print` and `tf.print` print the value of the tensor. But `print` may involve device-to-host copy, which can potentially slow down your code. - For graph mode including usage inside `tf.function`, you need to use `tf.print` to print the actual tensor value. `tf.print` is compiled into an op in the graph, whereas `print` and `logging.info` only log at tracing time, which is often not what you want. - `tf.print` also supports printing composite tensors like `tf.RaggedTensor` and `tf.sparse.SparseTensor`.- You can also use a callback to monitor metrics and variables. Please check how to use custom callbacks with [logs dict](https://www.tensorflow.org/guide/keras/custom_callbackusage_of_logs_dict) and [self.model attribute](https://www.tensorflow.org/guide/keras/custom_callbackusage_of_selfmodel_attribute). tf.print vs print inside tf.function
###Code
# `print` prints info of tensor object
# `tf.print` prints the tensor value
@tf.function
def dummy_func(num):
num += 1
print(num)
tf.print(num)
return num
_ = dummy_func(tf.constant([1.0]))
# Output:
# Tensor("add:0", shape=(1,), dtype=float32)
# [2]
###Output
_____no_output_____
###Markdown
tf.distribute.Strategy- If the `tf.function` containing `tf.print` is executed on the workers, for example when using `TPUStrategy` or `ParameterServerStrategy`, you need to check worker/parameter server logs to find the printed values.- For `print` or `logging.info`, logs will be printed on the coordinator when using `ParameterServerStrategy`, and logs will be printed on the STDOUT on worker0 when using TPUs.tf.keras.Model- When using Sequential and Functional API models, if you want to print values, e.g., model inputs or intermediate features after some layers, you have following options. 1. [Write a custom layer](https://www.tensorflow.org/guide/keras/custom_layers_and_models) that `tf.print` the inputs. 2. Include the intermediate outputs you want to inspect in the model outputs.- `tf.keras.layers.Lambda` layers have (de)serialization limitations. To avoid checkpoint loading issues, write a custom subclassed layer instead. Check the [API docs](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Lambda) for more details. - You can't `tf.print` intermediate outputs in a `tf.keras.callbacks.LambdaCallback` if you don't have access to the actual values, but instead only to the symbolic Keras tensor objects. Option 1: write a custom layer
###Code
class PrintLayer(tf.keras.layers.Layer):
def call(self, inputs):
tf.print(inputs)
return inputs
def get_model():
inputs = tf.keras.layers.Input(shape=(1,))
out_1 = tf.keras.layers.Dense(4)(inputs)
out_2 = tf.keras.layers.Dense(1)(out_1)
# use custom layer to tf.print intermediate features
out_3 = PrintLayer()(out_2)
model = tf.keras.Model(inputs=inputs, outputs=out_3)
return model
model = get_model()
model.compile(optimizer="adam", loss="mse")
model.fit([1, 2, 3], [0.0, 0.0, 1.0])
###Output
_____no_output_____
###Markdown
Option 2: include the intermediate outputs you want to inspect in the model outputs.Note that in such case, you may need some [customizations](https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit) to use `Model.fit`.
###Code
def get_model():
inputs = tf.keras.layers.Input(shape=(1,))
out_1 = tf.keras.layers.Dense(4)(inputs)
out_2 = tf.keras.layers.Dense(1)(out_1)
# include intermediate values in model outputs
model = tf.keras.Model(
inputs=inputs,
outputs={
'inputs': inputs,
'out_1': out_1,
'out_2': out_2})
return model
###Output
_____no_output_____
###Markdown
Copyright 2021 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Debug TF2 Migrated Training Pipeline View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook This notebook demonstrates how to debug training pipeline when migrating to TF2. It consists of following components:1. Suggested steps and code samples for debugging training pipeline2. Tools for debugging3. Other related resourcesOne assumption is you have TF1.x code and trained models for comparison, and you want to build a TF2 model that achieves similar validation accuracy.This notebook does **NOT** cover debugging performance issues for training/inference speed or memory usage. Debugging workflowBelow is a general workflow for debugging your TF2 training pipelines. Note that you do not need to follow these steps in order. You can also use a binary search approach where you test the model in an intermediate step and narrow down the debugging scope. 1. Fix compile and runtime errors2. Single forward pass validation (in a separate [guide](./validate_correctness.ipynb)) a. On single CPU device * Verify variables are created only once * Check variable counts, names, and shapes match * Reset all variables, check numerical equivalence with all randomness disabled * Align random number generation, check numerical equivalence in inference * (Optional) Check checkpoints are loaded properly and TF1.x/TF2 models generate identitcal output b. On single GPU/TPU device c. With multi-device strategies3. Model training numerical equivalence validation for a few steps (code samples available below) a. Single training step validation using small and fixed data on single CPU device. Specifically, check numerical equivalence for the following components * losses computation * metrics * learning rate * gradient computation and update b. Check statistics after training 3 or more steps to verify optimizer behaviors like the momentum, still with fixed data on single CPU device c. On single GPU/TPU device d. With multi-device strategies (check the intro for [MultiProcessRunner](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/distribute/multi_process_runner.pyL108) at the bottom)4. End-to-end covergence testing on real dataset a. Check training behaviors with TensorBoard * use simple optimizers e.g. SGD and simple distribution strategies e.g. `tf.distribute.OneDeviceStrategy` first * training metrics * evaluation metrics * figure out what the reasonable tolerance for inherent randomness is b. Check equivalence with advanced optimizer/learning rate scheduler/distribution strategies c. Check equivalence when using mixed precision5. Additional product benchmarks Setup
###Code
!pip uninstall -y -q tensorflow
# Install tf-nightly as the DeterministicRandomTestTool is only available in
# Tensorflow 2.8
!pip install -q tf-nightly
###Output
_____no_output_____
###Markdown
Single forward pass validation Single forward pass validation, including checkpoint loading, is covered in a different [colab](./validate_correctness.ipynb).
###Code
import sys
import unittest
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as v1
###Output
_____no_output_____
###Markdown
Model training numerical equivalence validation for a few steps Set up model configuration and prepare a fake dataset.
###Code
params = {
'input_size': 3,
'num_classes': 3,
'layer_1_size': 2,
'layer_2_size': 2,
'num_train_steps': 100,
'init_lr': 1e-3,
'end_lr': 0.0,
'decay_steps': 1000,
'lr_power': 1.0,
}
# make a small fixed dataset
fake_x = np.ones((2, params['input_size']), dtype=np.float32)
fake_y = np.zeros((2, params['num_classes']), dtype=np.int32)
fake_y[0][0] = 1
fake_y[1][1] = 1
step_num = 3
###Output
_____no_output_____
###Markdown
Define the TF1.x model.
###Code
# Assume there is an existing TF1.x model using estimator API
# Wrap the model_fn to log necessary tensors for result comparison
class SimpleModelWrapper():
def __init__(self):
self.logged_ops = {}
self.logs = {
'step': [],
'lr': [],
'loss': [],
'grads_and_vars': [],
'layer_out': []}
def model_fn(self, features, labels, mode, params):
out_1 = tf.compat.v1.layers.dense(features, units=params['layer_1_size'])
out_2 = tf.compat.v1.layers.dense(out_1, units=params['layer_2_size'])
logits = tf.compat.v1.layers.dense(out_2, units=params['num_classes'])
loss = tf.compat.v1.losses.softmax_cross_entropy(labels, logits)
# skip EstimatorSpec details for prediction and evaluation
if mode == tf.estimator.ModeKeys.PREDICT:
pass
if mode == tf.estimator.ModeKeys.EVAL:
pass
assert mode == tf.estimator.ModeKeys.TRAIN
global_step = tf.compat.v1.train.get_or_create_global_step()
lr = tf.compat.v1.train.polynomial_decay(
learning_rate=params['init_lr'],
global_step=global_step,
decay_steps=params['decay_steps'],
end_learning_rate=params['end_lr'],
power=params['lr_power'])
optmizer = tf.compat.v1.train.GradientDescentOptimizer(lr)
grads_and_vars = optmizer.compute_gradients(
loss=loss,
var_list=graph.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES))
train_op = optmizer.apply_gradients(
grads_and_vars,
global_step=global_step)
# log tensors
self.logged_ops['step'] = global_step
self.logged_ops['lr'] = lr
self.logged_ops['loss'] = loss
self.logged_ops['grads_and_vars'] = grads_and_vars
self.logged_ops['layer_out'] = {
'layer_1': out_1,
'layer_2': out_2,
'logits': logits}
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
def update_logs(self, logs):
for key in logs.keys():
model_tf1.logs[key].append(logs[key])
###Output
_____no_output_____
###Markdown
The following [`v1.keras.utils.DeterministicRandomTestTool`](https://www.tensorflow.org/api_docs/python/tf/compat/v1/keras/utils/DeterministicRandomTestTool) class provides a context manager `scope()` that can make stateful random operations use the same seed across both TF1 graphs/sessions and eager execution,The tool provides two testing modes: 1. `constant` which uses the same seed for every single operation no matter how many times it has been called and,2. `num_random_ops` which uses the number of previously-observed stateful random operations as the operation seed.This applies both to the stateful random operations used for creating and initializing variables, and to the stateful random operations used in computation (such as for dropout layers).
###Code
random_tool = v1.keras.utils.DeterministicRandomTestTool(mode='num_random_ops')
###Output
_____no_output_____
###Markdown
Run the TF1.x model in graph mode. Collect statistics for first 3 training steps for numerical equivalence comparison.
###Code
with random_tool.scope():
graph = tf.Graph()
with graph.as_default(), tf.compat.v1.Session(graph=graph) as sess:
model_tf1 = SimpleModelWrapper()
# build the model
inputs = tf.compat.v1.placeholder(tf.float32, shape=(None, params['input_size']))
labels = tf.compat.v1.placeholder(tf.float32, shape=(None, params['num_classes']))
spec = model_tf1.model_fn(inputs, labels, tf.estimator.ModeKeys.TRAIN, params)
train_op = spec.train_op
sess.run(tf.compat.v1.global_variables_initializer())
for step in range(step_num):
# log everything and update the model for one step
logs, _ = sess.run(
[model_tf1.logged_ops, train_op],
feed_dict={inputs: fake_x, labels: fake_y})
model_tf1.update_logs(logs)
###Output
_____no_output_____
###Markdown
Define the TF2 model.
###Code
class SimpleModel(tf.keras.Model):
def __init__(self, params, *args, **kwargs):
super(SimpleModel, self).__init__(*args, **kwargs)
# define the model
self.dense_1 = tf.keras.layers.Dense(params['layer_1_size'])
self.dense_2 = tf.keras.layers.Dense(params['layer_2_size'])
self.out = tf.keras.layers.Dense(params['num_classes'])
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=params['init_lr'],
decay_steps=params['decay_steps'],
end_learning_rate=params['end_lr'],
power=params['lr_power'])
self.optimizer = tf.keras.optimizers.SGD(learning_rate_fn)
self.compiled_loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
self.logs = {
'lr': [],
'loss': [],
'grads': [],
'weights': [],
'layer_out': []}
def call(self, inputs):
out_1 = self.dense_1(inputs)
out_2 = self.dense_2(out_1)
logits = self.out(out_2)
# log output features for every layer for comparison
layer_wise_out = {
'layer_1': out_1,
'layer_2': out_2,
'logits': logits}
self.logs['layer_out'].append(layer_wise_out)
return logits
def train_step(self, data):
x, y = data
with tf.GradientTape() as tape:
logits = self(x)
loss = self.compiled_loss(y, logits)
grads = tape.gradient(loss, self.trainable_weights)
# log training statistics
step = self.optimizer.iterations.numpy()
self.logs['lr'].append(self.optimizer.learning_rate(step).numpy())
self.logs['loss'].append(loss.numpy())
self.logs['grads'].append(grads)
self.logs['weights'].append(self.trainable_weights)
# update model
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
return
###Output
_____no_output_____
###Markdown
Run the TF2 model in eager mode. Collect statistics for first 3 training steps for numerical equivalence comparison.
###Code
random_tool = v1.keras.utils.DeterministicRandomTestTool(mode='num_random_ops')
with random_tool.scope():
model_tf2 = SimpleModel(params)
for step in range(step_num):
model_tf2.train_step([fake_x, fake_y])
###Output
_____no_output_____
###Markdown
Compare numerical equivalence for first few training steps.You can also check the [Validating correctness & numerical equivalence notebook](./validate_correctness.ipynb) for additional advice for numerical equivalence.
###Code
np.testing.assert_allclose(model_tf1.logs['lr'], model_tf2.logs['lr'])
np.testing.assert_allclose(model_tf1.logs['loss'], model_tf2.logs['loss'])
for step in range(step_num):
for name in model_tf1.logs['layer_out'][step]:
np.testing.assert_allclose(
model_tf1.logs['layer_out'][step][name],
model_tf2.logs['layer_out'][step][name])
###Output
_____no_output_____
###Markdown
Copyright 2021 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Debug TF2 Migrated Training Pipeline View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook This notebook demonstrates how to debug training pipeline when migrating to TF2. It consists of following components:1. Suggested steps and code samples for debugging training pipeline2. Tools for debugging3. Other related resourcesOne assumption is you have TF1.x code and trained models for comparison, and you want to build a TF2 model that achieves similar validation accuracy.This notebook does **NOT** cover debugging performance issues for training/inference speed or memory usage. Debugging workflowBelow is a general workflow for debugging your TF2 training pipelines. Note that you do not need to follow these steps in order. You can also use a binary search approach where you test the model in an intermediate step and narrow down the debugging scope. 1. Fix compile and runtime errors2. Single forward pass validation (in a separate [guide](./validate_correctness.ipynb)) a. On single CPU device * Verify variables are created only once * Check variable counts, names, and shapes match * Reset all variables, check numerical equivalence with all randomness disabled * Align random number generation, check numerical equivalence in inference * (Optional) Check checkpoints are loaded properly and TF1.x/TF2 models generate identitcal output b. On single GPU/TPU device c. With multi-device strategies3. Model training numerical equivalence validation for a few steps (code samples available below) a. Single training step validation using small and fixed data on single CPU device. Specifically, check numerical equivalence for the following components * losses computation * metrics * learning rate * gradient computation and update b. Check statistics after training 3 or more steps to verify optimizer behaviors like the momentum, still with fixed data on single CPU device c. On single GPU/TPU device d. With multi-device strategies (check the intro for [MultiProcessRunner](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/distribute/multi_process_runner.pyL108) at the bottom)4. End-to-end covergence testing on real dataset a. Check training behaviors with TensorBoard * use simple optimizers e.g. SGD and simple distribution strategies e.g. `tf.distribute.OneDeviceStrategy` first * training metrics * evaluation metrics * figure out what the reasonable tolerance for inherent randomness is b. Check equivalence with advanced optimizer/learning rate scheduler/distribution strategies c. Check equivalence when using mixed precision5. Additional product benchmarks Single forward pass validation Single forward pass validation, including checkpoint loading, is covered in a different [colab](./validate_correctness.ipynb).
###Code
import sys
import unittest
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as v1
###Output
_____no_output_____
###Markdown
Define a context manager to control random number generation.
###Code
seed_implementation = sys.modules[tf.compat.v1.get_seed.__module__]
class DeterministicTestTool(object):
def __init__(self, seed: int = 42, mode='constant'):
"""Set mode to 'constant' or 'num_random_ops'. Defaults to 'constant'."""
if mode not in {'constant', 'num_random_ops'}:
raise ValueError("Mode arg must be 'constant' or 'num_random_ops'. " +
"Got: {}".format(mode))
self._mode = mode
self._seed = seed
self.operation_seed = 0
self._observed_seeds = set()
def scope(self):
tf.random.set_seed(self._seed)
def _get_seed(_):
"""Wraps TF get_seed to make deterministic random generation easier.
This makes a variable's initialization (and calls that involve random
number generation) depend only on how many random number generations
were used in the scope so far, rather than on how many unrelated
operations the graph contains.
Returns:
Random seed tuple.
"""
op_seed = self.operation_seed
if self._mode == "constant":
tf.random.set_seed(op_seed)
else:
if op_seed in self._observed_seeds:
raise ValueError(
'This `DeterministicTestTool` object is trying to re-use the ' +
'already-used operation seed {}. '.format(op_seed) +
'It cannot guarantee random numbers will match between eager ' +
'and sessions when an operation seed is reused. ' +
'You most likely set ' +
'`operation_seed` explicitly but used a value that caused the ' +
'naturally-incrementing operation seed sequences to overlap ' +
'with an already-used seed.')
self._observed_seeds.add(op_seed)
self.operation_seed += 1
return (self._seed, op_seed)
# mock.patch internal symbols to modify the behavior of TF APIs relying on them
return unittest.mock.patch.object(seed_implementation, 'get_seed', wraps=_get_seed)
###Output
_____no_output_____
###Markdown
Model training numerical equivalence validation for a few steps Set up model configuration and prepare a fake dataset.
###Code
params = {
'input_size': 3,
'num_classes': 3,
'layer_1_size': 2,
'layer_2_size': 2,
'num_train_steps': 100,
'init_lr': 1e-3,
'end_lr': 0.0,
'decay_steps': 1000,
'lr_power': 1.0,
}
# make a small fixed dataset
fake_x = np.ones((2, params['input_size']), dtype=np.float32)
fake_y = np.zeros((2, params['num_classes']), dtype=np.int32)
fake_y[0][0] = 1
fake_y[1][1] = 1
step_num = 3
###Output
_____no_output_____
###Markdown
Define the TF1.x model.
###Code
# Assume there is an existing TF1.x model using estimator API
# Wrap the model_fn to log necessary tensors for result comparison
class SimpleModelWrapper():
def __init__(self):
self.logged_ops = {}
self.logs = {
'step': [],
'lr': [],
'loss': [],
'grads_and_vars': [],
'layer_out': []}
def model_fn(self, features, labels, mode, params):
out_1 = tf.compat.v1.layers.dense(features, units=params['layer_1_size'])
out_2 = tf.compat.v1.layers.dense(out_1, units=params['layer_2_size'])
logits = tf.compat.v1.layers.dense(out_2, units=params['num_classes'])
loss = tf.compat.v1.losses.softmax_cross_entropy(labels, logits)
# skip EstimatorSpec details for prediction and evaluation
if mode == tf.estimator.ModeKeys.PREDICT:
pass
if mode == tf.estimator.ModeKeys.EVAL:
pass
assert mode == tf.estimator.ModeKeys.TRAIN
global_step = tf.compat.v1.train.get_or_create_global_step()
lr = tf.compat.v1.train.polynomial_decay(
learning_rate=params['init_lr'],
global_step=global_step,
decay_steps=params['decay_steps'],
end_learning_rate=params['end_lr'],
power=params['lr_power'])
optmizer = tf.compat.v1.train.GradientDescentOptimizer(lr)
grads_and_vars = optmizer.compute_gradients(
loss=loss,
var_list=graph.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES))
train_op = optmizer.apply_gradients(
grads_and_vars,
global_step=global_step)
# log tensors
self.logged_ops['step'] = global_step
self.logged_ops['lr'] = lr
self.logged_ops['loss'] = loss
self.logged_ops['grads_and_vars'] = grads_and_vars
self.logged_ops['layer_out'] = {
'layer_1': out_1,
'layer_2': out_2,
'logits': logits}
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
def update_logs(self, logs):
for key in logs.keys():
model_tf1.logs[key].append(logs[key])
###Output
_____no_output_____
###Markdown
Run the TF1.x model in graph mode. Collect statistics for first 3 training steps for numerical equivalence comparison.
###Code
random_tool = DeterministicTestTool(mode='num_random_ops')
with random_tool.scope():
graph = tf.Graph()
with graph.as_default(), tf.compat.v1.Session(graph=graph) as sess:
model_tf1 = SimpleModelWrapper()
# build the model
inputs = tf.compat.v1.placeholder(tf.float32, shape=(None, params['input_size']))
labels = tf.compat.v1.placeholder(tf.float32, shape=(None, params['num_classes']))
spec = model_tf1.model_fn(inputs, labels, tf.estimator.ModeKeys.TRAIN, params)
train_op = spec.train_op
sess.run(tf.compat.v1.global_variables_initializer())
for step in range(step_num):
# log everything and update the model for one step
logs, _ = sess.run(
[model_tf1.logged_ops, train_op],
feed_dict={inputs: fake_x, labels: fake_y})
model_tf1.update_logs(logs)
###Output
_____no_output_____
###Markdown
Define the TF2 model.
###Code
class SimpleModel(tf.keras.Model):
def __init__(self, params, *args, **kwargs):
super(SimpleModel, self).__init__(*args, **kwargs)
# define the model
self.dense_1 = tf.keras.layers.Dense(params['layer_1_size'])
self.dense_2 = tf.keras.layers.Dense(params['layer_2_size'])
self.out = tf.keras.layers.Dense(params['num_classes'])
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=params['init_lr'],
decay_steps=params['decay_steps'],
end_learning_rate=params['end_lr'],
power=params['lr_power'])
self.optimizer = tf.keras.optimizers.SGD(learning_rate_fn)
self.compiled_loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
self.logs = {
'lr': [],
'loss': [],
'grads': [],
'weights': [],
'layer_out': []}
def call(self, inputs):
out_1 = self.dense_1(inputs)
out_2 = self.dense_2(out_1)
logits = self.out(out_2)
# log output features for every layer for comparison
layer_wise_out = {
'layer_1': out_1,
'layer_2': out_2,
'logits': logits}
self.logs['layer_out'].append(layer_wise_out)
return logits
def train_step(self, data):
x, y = data
with tf.GradientTape() as tape:
logits = self(x)
loss = self.compiled_loss(y, logits)
grads = tape.gradient(loss, self.trainable_weights)
# log training statistics
step = self.optimizer.iterations.numpy()
self.logs['lr'].append(self.optimizer.learning_rate(step).numpy())
self.logs['loss'].append(loss.numpy())
self.logs['grads'].append(grads)
self.logs['weights'].append(self.trainable_weights)
# update model
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
return
###Output
_____no_output_____
###Markdown
Run the TF2 model in eager mode. Collect statistics for first 3 training steps for numerical equivalence comparison.
###Code
random_tool = DeterministicTestTool(mode='num_random_ops')
with random_tool.scope():
model_tf2 = SimpleModel(params)
for step in range(step_num):
model_tf2.train_step([fake_x, fake_y])
###Output
_____no_output_____
###Markdown
Compare numerical equivalence for first few training steps.You can also check the [Validating correctness & numerical equivalence notebook](./validate_correctness.ipynb) for additonal advice for numerical equivalence.
###Code
np.testing.assert_allclose(model_tf1.logs['lr'], model_tf2.logs['lr'])
np.testing.assert_allclose(model_tf1.logs['loss'], model_tf2.logs['loss'])
for step in range(step_num):
for name in model_tf1.logs['layer_out'][step]:
np.testing.assert_allclose(
model_tf1.logs['layer_out'][step][name],
model_tf2.logs['layer_out'][step][name])
###Output
_____no_output_____
###Markdown
Unit tests There are a few types of unit testing that can help debug your migration code.1. Single forward pass validation2. Model training numerical equivalence validation for a few steps3. Benchmark inference performance4. The trained model makes correct predictions on fixed and simple data pointsYou can use `@parameterized.parameters` to test models with different configurations. [Details with code sample](https://github.com/abseil/abseil-py/blob/master/absl/testing/parameterized.py).Note that it's possible to run session APIs and eager execution in the same test case. The code snippets below show how.
###Code
import unittest
class TestNumericalEquivalence(unittest.TestCase):
# copied from code samples above
def setup(self):
# record statistics for 100 training steps
step_num = 100
# setup TF 1 model
random_tool = DeterministicTestTool(mode='num_random_ops')
with random_tool.scope():
# run TF1.x code in graph mode with context management
graph = tf.Graph()
with graph.as_default(), tf.compat.v1.Session(graph=graph) as sess:
self.model_tf1 = SimpleModelWrapper()
# build the model
inputs = tf.compat.v1.placeholder(tf.float32, shape=(None, params['input_size']))
labels = tf.compat.v1.placeholder(tf.float32, shape=(None, params['num_classes']))
spec = self.model_tf1.model_fn(inputs, labels, tf.estimator.ModeKeys.TRAIN, params)
train_op = spec.train_op
sess.run(tf.compat.v1.global_variables_initializer())
for step in range(step_num):
# log everything and update the model for one step
logs, _ = sess.run(
[self.model_tf1.logged_ops, train_op],
feed_dict={inputs: fake_x, labels: fake_y})
self.model_tf1.update_logs(logs)
# setup TF2 model
random_tool = DeterministicTestTool(mode='num_random_ops')
with random_tool.scope():
self.model_tf2 = SimpleModel(params)
for step in range(step_num):
self.model_tf2.train_step([fake_x, fake_y])
def test_learning_rate(self):
np.testing.assert_allclose(
self.model_tf1.logs['lr'],
self.model_tf2.logs['lr'])
def test_training_loss(self):
# adopt different tolerance strategies before and after 10 steps
first_n_step = 10
# abosolute difference is limited below 1e-5
# set `equal_nan` to be False to detect potential NaN loss issues
abosolute_tolerance = 1e-5
np.testing.assert_allclose(
actual=self.model_tf1.logs['loss'][:first_n_step],
desired=self.model_tf2.logs['loss'][:first_n_step],
atol=abosolute_tolerance,
equal_nan=False)
# relative difference is limited below 5%
relative_tolerance = 0.05
np.testing.assert_allclose(self.model_tf1.logs['loss'][first_n_step:],
self.model_tf2.logs['loss'][first_n_step:],
rtol=relative_tolerance,
equal_nan=False)
###Output
_____no_output_____
###Markdown
Debugging tools tf.printtf.print vs print/logging.info- With configurable arguments, `tf.print` can recursively display show first and last few elements of each dimension for printed tensors. Check the [API docs](https://www.tensorflow.org/api_docs/python/tf/print) for details.- For eager execution, both `print` and `tf.print` print the value of the tensor. But `print` may involve device-to-host copy, which can potentially slow down your code. - For graph mode including usage inside `tf.function`, you need to use `tf.print` to print the actual tensor value. `tf.print` is compiled into an op in the graph, whereas `print` and `logging.info` only log at tracing time, which is often not what you want. - `tf.print` also supports printing composite tensors like `tf.RaggedTensor` and `tf.sparse.SparseTensor`.- You can also use a callback to monitor metrics and variables. Please check how to use custom callbacks with [logs dict](https://www.tensorflow.org/guide/keras/custom_callbackusage_of_logs_dict) and [self.model attribute](https://www.tensorflow.org/guide/keras/custom_callbackusage_of_selfmodel_attribute). tf.print vs print inside tf.function
###Code
# `print` prints info of tensor object
# `tf.print` prints the tensor value
@tf.function
def dummy_func(num):
num += 1
print(num)
tf.print(num)
return num
_ = dummy_func(tf.constant([1.0]))
# Output:
# Tensor("add:0", shape=(1,), dtype=float32)
# [2]
###Output
_____no_output_____
###Markdown
tf.distribute.Strategy- If the `tf.function` containing `tf.print` is executed on the workers, for example when using `TPUStrategy` or `ParameterServerStrategy`, you need to check worker/parameter server logs to find the printed values.- For `print` or `logging.info`, logs will be printed on the coordinator when using `ParameterServerStrategy`, and logs will be printed on the STDOUT on worker0 when using TPUs.tf.keras.Model- When using Sequential and Functional API models, if you want to print values, e.g., model inputs or intermediate features after some layers, you have following options. 1. [Write a custom layer](https://www.tensorflow.org/guide/keras/custom_layers_and_models) that `tf.print` the inputs. 2. Include the intermediate outputs you want to inspect in the model outputs.- `tf.keras.layers.Lambda` layers have (de)serialization limitations. To avoid checkpoint loading issues, write a custom subclassed layer instead. Check the [API docs](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Lambda) for more details. - You can't `tf.print` intermediate outputs in a `tf.keras.callbacks.LambdaCallback` if you don't have access to the actual values, but instead only to the symbolic Keras tensor objects. Option 1: write a custom layer
###Code
class PrintLayer(tf.keras.layers.Layer):
def call(self, inputs):
tf.print(inputs)
return inputs
def get_model():
inputs = tf.keras.layers.Input(shape=(1,))
out_1 = tf.keras.layers.Dense(4)(inputs)
out_2 = tf.keras.layers.Dense(1)(out_1)
# use custom layer to tf.print intermediate features
out_3 = PrintLayer()(out_2)
model = tf.keras.Model(inputs=inputs, outputs=out_3)
return model
model = get_model()
model.compile(optimizer="adam", loss="mse")
model.fit([1, 2, 3], [0.0, 0.0, 1.0])
###Output
_____no_output_____
###Markdown
Option 2: include the intermediate outputs you want to inspect in the model outputs.Note that in such case, you may need some [customizations](https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit) to use `Model.fit`.
###Code
def get_model():
inputs = tf.keras.layers.Input(shape=(1,))
out_1 = tf.keras.layers.Dense(4)(inputs)
out_2 = tf.keras.layers.Dense(1)(out_1)
# include intermediate values in model outputs
model = tf.keras.Model(
inputs=inputs,
outputs={
'inputs': inputs,
'out_1': out_1,
'out_2': out_2})
return model
###Output
_____no_output_____ |
what_is_cooking.ipynb | ###Markdown
What's cooking competiton**The competitions** asks to predict the category of a dish's cuisine given a list of its ingredients. In the dataset, we include the **recipe id**, **the type of cuisine**, and **the list of ingredients of each recipe** (of variable length). The data is stored in JSON format. An example of a recipe node in train.json:```json { "id": 24717, "cuisine": "indian", "ingredients": [ "tumeric", "vegetable stock", "tomatoes", "garam masala", "naan", "red lentils", "red chili peppers", "onions", "spinach", "sweet potatoes" ] }, ``` In the test file **test.json**, the format of a recipe is the same as **train.json**, only *the cuisine type* is removed, as it is the target variable you are going to predict. File descriptions:**train.json -** the training set containing recipes id, type of cuisine, and list of ingredients**test.json -** the test set containing recipes id, and list of ingredients**sample_submission.csv -** a sample submission file in the correct formatIn the dataset, we include the recipe id, the type of cuisine, and the list of ingredients of each recipe (of variable length). The data is stored in JSON format. -------
###Code
# include required libraries
import os
import sys
import data_utils as du
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from sklearn.model_selection import train_test_split
# Read the data from files
trainingData = du.readJson('./data/train.json')
testData = du.readJson('./data/test.json')
###Output
_____no_output_____
###Markdown
Exploring first few rows of each data set
###Code
print('TRAINING DATA:')
trainingData.head()
print('TEST DATA:')
testData.head()
###Output
TEST DATA:
###Markdown
Make a Distribution map for ingredients and food types in the training set
###Code
ingredientsColumn = trainingData['ingredients']
recipeType = trainingData['cuisine']
ingredientsDist = du.dataDistributionMap(ingredientsColumn)
cuisineDist = du.dataDistributionMap(recipeType)
print ('Number of cuisines Type in the data set:',len(cuisineDist))
print ('Number of ingredients in the data set:',len(ingredientsDist))
###Output
<class 'list'>
<class 'str'>
Number of cuisines in the data set: 20
Number of ingredients in the data set: 6714
###Markdown
------- Prepare and the data to be used as input of a Neural Network ase well as create training and dev data-set1. we need to create input vectors representing each **recipe** and output vectors representing a **cuisine type**.2. Create a basic Neural Network architecture using TensorFLow and Keras to traing the network with training data (experiment) 3. There is a bias between training and test 95.65% vs 53.46%, maybe some regularization techniques help. 4. XYZ Prepre the input matrix:
###Code
# create a map of ingredients
w2i, i2w = du.wordsToMap(list(ingredientsDist.keys()))
max(len(recipe) for recipe in trainingData['ingredients'])
# An input matrix with having the ingredients encoded for each recipe and zero padding.
# Each row is a recipe in the training set and will be the input of the first layer of NN
X = du.convertToInputMatrix(trainingData['ingredients'], w2i)
###Output
_____no_output_____
###Markdown
Sanity Check:
###Code
X[3]
trainingData['ingredients'][0]
print(i2w[0],"\n",i2w[1], "\n",i2w[2], "\n",i2w[3])
###Output
thai basil
rice paddy herb
crimini mushrooms
part-skim mozzarella
###Markdown
Prepare the expected output matrix:
###Code
# create a map of index to cuisine nad cuisine to index
c2i, i2c = du.wordsToMap(list(cuisineDist))
Y = du.convertToOutputMatrix(trainingData['cuisine'], c2i)
###Output
_____no_output_____
###Markdown
Sanity Check:
###Code
Y[0:20]
trainingData['cuisine'][1]
Y[1]
print(c2i['southern_us'], i2c[1])
# split into 85% for train and 15% for test
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
###Output
_____no_output_____
###Markdown
Build the model:
###Code
# create model
model = Sequential()
model.add(Dense(32, input_dim=6714, activation='relu', kernel_initializer="uniform"))
model.add(Dense(44, activation='tanh', kernel_initializer="uniform"))
model.add(Dense(32, activation='tanh', kernel_initializer="uniform"))
model.add(Dense(32, activation='tanh', kernel_initializer="uniform"))
model.add(Dense(25, activation='tanh', kernel_initializer="uniform"))
model.add(Dense(20, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
model.fit(X_train, Y_train, epochs=150, batch_size=500)
# evaluate the model
scores = model.evaluate(X_test, Y_test)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
###Output
5967/5967 [==============================] - 0s 82us/step
acc: 53.46%
|
N_Hidden_Layers.ipynb | ###Markdown
Setting up the Environment
###Code
import numpy as np
import pandas as pd
###Output
_____no_output_____
###Markdown
Getting the Data
###Code
data = pd.read_csv("./train.csv")
data.sample(frac = 1).reset_index(drop = True)
data.head()
print("No of Examples: ",data.shape[0])
###Output
No of Examples: 42000
###Markdown
Splitting the Data into Train and Validation
###Code
train_data = data.iloc[:33600]
valid_data = data.iloc[33600:]
print(train_data.shape)
print(valid_data.shape)
X_train = train_data.iloc[:,1:].reset_index(drop = True)
y_train = train_data.iloc[:,0].reset_index(drop = True)
X_valid = valid_data.iloc[:,1:].reset_index(drop = True)
y_valid = valid_data.iloc[:,0].reset_index(drop = True)
print("X_train : ", X_train.shape)
print("y_train : ", y_train.shape)
print("X_valid : ", X_valid.shape)
print("y_valid : ", y_valid.shape)
###Output
X_train : (33600, 784)
y_train : (33600,)
X_valid : (8400, 784)
y_valid : (8400,)
###Markdown
Getting the Train Input and Labels into Shape
###Code
X_train = X_train.to_numpy().transpose()
# One Hot Encoding the Labels
label_mat = np.zeros((10, y_train.shape[0]))
for i in range(y_train.shape[0]):
label_mat[y_train[i]][i] = 1
y_train_onehot = label_mat
print("X_train : ", X_train.shape)
print("y_train : ", y_train_onehot.shape)
###Output
X_train : (784, 33600)
y_train : (10, 33600)
###Markdown
Rescaling the Train Input
###Code
X_train = X_train / 255
###Output
_____no_output_____
###Markdown
Modelling the Neural Network and Functions
###Code
def sigmoid(x):
return 1.0/(1+ np.exp(-x))
def sigmoid_derivative(x):
s = sigmoid(x)
return s * (1.0 - s)
class NeuralNetwork:
def __init__(self, sizes):
np.random.seed(7)
self.num_layers = len(sizes)
self.sizes = sizes
self.biases = np.array([np.zeros((y,1)) for y in sizes[1:]])
self.weights = np.array([np.random.randn(y, x) * 0.05 for x, y in zip(sizes[:-1], sizes[1:])])
def feedforward(self, a):
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a)+b)
return a
def cost_func(self,a2,y):
logloss = np.multiply(np.log(a2),y) + np.multiply((1-y),np.log(1-a2))
cost = -( np.sum(logloss) ) / m
cost = np.squeeze(cost)
return cost
def test(self,t):
test_output = self.feedforward(t)
t_label = test_output.argmax(axis = 0)
return t_label
def cost_derivative(self, output_activations, y):
return (output_activations-y)
def backprop(self,x,y,alpha):
n_b = np.array([np.zeros(b.shape) for b in self.biases])
n_w = np.array([np.zeros(w.shape) for w in self.weights])
m = x.shape[1]
# feedforward pass
activation = x
activations = [x]
zs = []
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
cost = self.cost_func(activations[-1],y)
delta = self.cost_derivative(activations[-1], y)
n_b[-1] = (1/m) * delta.sum(axis=1,keepdims = True )
n_w[-1] = (1/m) * np.dot(delta, activations[-2].transpose())
# l=1 means last layer, l=2 means 2nd layer layer, so on and so forth.
for l in range(2, self.num_layers):
z = zs[-l]
sd = sigmoid_derivative(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sd
n_b[-l] = (1/m) * delta.sum(axis=1, keepdims= True)
n_w[-l] = (1/m) * np.dot(delta, activations[-l-1].transpose())
self.weights = self.weights - alpha * n_w
self.biases = self.biases - alpha * n_b
return cost
###Output
_____no_output_____
###Markdown
Training the Model
###Code
n, m = X_train.shape
print("No of Training Examples: ", m)
print("No of Pixels in each image: ", n)
sizes = [n,500,10] # No of units in each layers
learning_rate = 0.1
epochs = 10000
nn = NeuralNetwork(sizes)
for i in range(epochs):
cost = nn.backprop(X_train, y_train_onehot, learning_rate)
if (i%10 == 0) or (i == epochs-1):
print("Iteration: {0} ---> Cost: {1}".format(i,cost))
###Output
Iteration: 0 ---> Cost: 7.814775643575334
Iteration: 10 ---> Cost: 3.1218717215801046
Iteration: 20 ---> Cost: 2.979932475022972
Iteration: 30 ---> Cost: 2.843202139634936
Iteration: 40 ---> Cost: 2.710422967517665
Iteration: 50 ---> Cost: 2.581791068799056
Iteration: 60 ---> Cost: 2.458342814616878
Iteration: 70 ---> Cost: 2.341389246104713
Iteration: 80 ---> Cost: 2.2320924085424063
Iteration: 90 ---> Cost: 2.131221187181326
Iteration: 100 ---> Cost: 2.039070178169018
Iteration: 110 ---> Cost: 1.95549889096356
Iteration: 120 ---> Cost: 1.8800390182934674
Iteration: 130 ---> Cost: 1.8120210185758678
Iteration: 140 ---> Cost: 1.750686171214641
Iteration: 150 ---> Cost: 1.695268728916242
Iteration: 160 ---> Cost: 1.64504697095832
Iteration: 170 ---> Cost: 1.5993692284153718
Iteration: 180 ---> Cost: 1.5576627849316256
Iteration: 190 ---> Cost: 1.5194325387974248
Iteration: 200 ---> Cost: 1.4842543987014711
Iteration: 210 ---> Cost: 1.4517665821218577
Iteration: 220 ---> Cost: 1.4216606314189375
Iteration: 230 ---> Cost: 1.3936730682883693
Iteration: 240 ---> Cost: 1.3675780686796548
Iteration: 250 ---> Cost: 1.3431812424152625
Iteration: 260 ---> Cost: 1.320314453041385
Iteration: 270 ---> Cost: 1.298831550970643
Iteration: 280 ---> Cost: 1.2786048770096328
Iteration: 290 ---> Cost: 1.2595224003173908
Iteration: 300 ---> Cost: 1.2414853714526062
Iteration: 310 ---> Cost: 1.2244063904068054
Iteration: 320 ---> Cost: 1.208207807952207
Iteration: 330 ---> Cost: 1.1928203948073646
Iteration: 340 ---> Cost: 1.1781822266337199
Iteration: 350 ---> Cost: 1.1642377438214475
Iteration: 360 ---> Cost: 1.1509369537172522
Iteration: 370 ---> Cost: 1.1382347497655807
Iteration: 380 ---> Cost: 1.126090327340911
Iteration: 390 ---> Cost: 1.114466680161272
Iteration: 400 ---> Cost: 1.1033301643572642
Iteration: 410 ---> Cost: 1.0926501197403822
Iteration: 420 ---> Cost: 1.0823985397371843
Iteration: 430 ---> Cost: 1.0725497829619366
Iteration: 440 ---> Cost: 1.0630803205890005
Iteration: 450 ---> Cost: 1.0539685146328281
Iteration: 460 ---> Cost: 1.0451944230047097
Iteration: 470 ---> Cost: 1.0367396278340668
Iteration: 480 ---> Cost: 1.0285870840500506
Iteration: 490 ---> Cost: 1.020720985640578
Iteration: 500 ---> Cost: 1.013126647358769
Iteration: 510 ---> Cost: 1.0057903999447355
Iteration: 520 ---> Cost: 0.998699497184279
Iteration: 530 ---> Cost: 0.9918420333432897
Iteration: 540 ---> Cost: 0.9852068697036999
Iteration: 550 ---> Cost: 0.9787835690887193
Iteration: 560 ---> Cost: 0.9725623374055496
Iteration: 570 ---> Cost: 0.9665339713561141
Iteration: 580 ---> Cost: 0.9606898115729979
Iteration: 590 ---> Cost: 0.9550217005309988
Iteration: 600 ---> Cost: 0.949521944666158
Iteration: 610 ---> Cost: 0.944183280205423
Iteration: 620 ---> Cost: 0.9389988422724365
Iteration: 630 ---> Cost: 0.9339621368895308
Iteration: 640 ---> Cost: 0.9290670155436882
Iteration: 650 ---> Cost: 0.9243076520259613
Iteration: 660 ---> Cost: 0.9196785212902808
Iteration: 670 ---> Cost: 0.9151743801094003
Iteration: 680 ---> Cost: 0.910790249333483
Iteration: 690 ---> Cost: 0.906521397581049
Iteration: 700 ---> Cost: 0.9023633262130782
Iteration: 710 ---> Cost: 0.8983117554594413
Iteration: 720 ---> Cost: 0.8943626115827922
Iteration: 730 ---> Cost: 0.8905120149789693
Iteration: 740 ---> Cost: 0.8867562691250158
Iteration: 750 ---> Cost: 0.8830918502964556
Iteration: 760 ---> Cost: 0.8795153979845575
Iteration: 770 ---> Cost: 0.8760237059522831
Iteration: 780 ---> Cost: 0.8726137138744964
Iteration: 790 ---> Cost: 0.869282499514009
Iteration: 800 ---> Cost: 0.866027271390266
Iteration: 810 ---> Cost: 0.8628453619020293
Iteration: 820 ---> Cost: 0.8597342208693755
Iteration: 830 ---> Cost: 0.8566914094638042
Iteration: 840 ---> Cost: 0.8537145944982839
Iteration: 850 ---> Cost: 0.8508015430517226
Iteration: 860 ---> Cost: 0.8479501174046995
Iteration: 870 ---> Cost: 0.8451582702653473
Iteration: 880 ---> Cost: 0.8424240402661037
Iteration: 890 ---> Cost: 0.8397455477136638
Iteration: 900 ---> Cost: 0.8371209905759016
Iteration: 910 ---> Cost: 0.8345486406908101
Iteration: 920 ---> Cost: 0.8320268401836622
Iteration: 930 ---> Cost: 0.8295539980796153
Iteration: 940 ---> Cost: 0.8271285870999172
Iteration: 950 ---> Cost: 0.8247491406307151
Iteration: 960 ---> Cost: 0.8224142498542235
Iteration: 970 ---> Cost: 0.8201225610327069
Iteration: 980 ---> Cost: 0.8178727729363734
Iteration: 990 ---> Cost: 0.8156636344068362
Iteration: 1000 ---> Cost: 0.8134939420483722
Iteration: 1010 ---> Cost: 0.811362538039655
Iteration: 1020 ---> Cost: 0.8092683080591336
Iteration: 1030 ---> Cost: 0.807210179317629
Iteration: 1040 ---> Cost: 0.8051871186921165
Iteration: 1050 ---> Cost: 0.8031981309550345
Iteration: 1060 ---> Cost: 0.8012422570937826
Iteration: 1070 ---> Cost: 0.7993185727154131
Iteration: 1080 ---> Cost: 0.7974261865317909
Iteration: 1090 ---> Cost: 0.7955642389207906
Iteration: 1100 ---> Cost: 0.7937319005593517
Iteration: 1110 ---> Cost: 0.7919283711244611
Iteration: 1120 ---> Cost: 0.790152878058353
Iteration: 1130 ---> Cost: 0.7884046753944352
Iteration: 1140 ---> Cost: 0.7866830426406514
Iteration: 1150 ---> Cost: 0.784987283717181
Iteration: 1160 ---> Cost: 0.7833167259455431
Iteration: 1170 ---> Cost: 0.7816707190863609
Iteration: 1180 ---> Cost: 0.780048634423171
Iteration: 1190 ---> Cost: 0.7784498638898425
Iteration: 1200 ---> Cost: 0.7768738192392777
Iteration: 1210 ---> Cost: 0.7753199312512234
Iteration: 1220 ---> Cost: 0.7737876489771303
Iteration: 1230 ---> Cost: 0.7722764390201161
Iteration: 1240 ---> Cost: 0.7707857848482086
Iteration: 1250 ---> Cost: 0.769315186139124
Iteration: 1260 ---> Cost: 0.767864158154962
Iteration: 1270 ---> Cost: 0.7664322311452725
Iteration: 1280 ---> Cost: 0.7650189497770293
Iteration: 1290 ---> Cost: 0.763623872590151
Iteration: 1300 ---> Cost: 0.7622465714772602
Iteration: 1310 ---> Cost: 0.7608866311864579
Iteration: 1320 ---> Cost: 0.7595436488459607
Iteration: 1330 ---> Cost: 0.7582172335094938
Iteration: 1340 ---> Cost: 0.7569070057214183
Iteration: 1350 ---> Cost: 0.7556125971006062
Iteration: 1360 ---> Cost: 0.7543336499421438
Iteration: 1370 ---> Cost: 0.7530698168359842
Iteration: 1380 ---> Cost: 0.7518207603017284
Iteration: 1390 ---> Cost: 0.7505861524387486
Iteration: 1400 ---> Cost: 0.7493656745909121
Iteration: 1410 ---> Cost: 0.7481590170252118
Iteration: 1420 ---> Cost: 0.7469658786236328
Iteration: 1430 ---> Cost: 0.7457859665876335
Iteration: 1440 ---> Cost: 0.7446189961546409
Iteration: 1450 ---> Cost: 0.7434646903260027
Iteration: 1460 ---> Cost: 0.7423227796058586
Iteration: 1470 ---> Cost: 0.7411930017504254
Iteration: 1480 ---> Cost: 0.7400751015272171
Iteration: 1490 ---> Cost: 0.7389688304837473
Iteration: 1500 ---> Cost: 0.7378739467252784
Iteration: 1510 ---> Cost: 0.736790214701211
Iteration: 1520 ---> Cost: 0.7357174049997256
Iteration: 1530 ---> Cost: 0.7346552941503076
Iteration: 1540 ---> Cost: 0.7336036644338043
Iteration: 1550 ---> Cost: 0.7325623036996826
Iteration: 1560 ---> Cost: 0.7315310051901721
Iteration: 1570 ---> Cost: 0.7305095673709937
Iteration: 1580 ---> Cost: 0.729497793768385
Iteration: 1590 ---> Cost: 0.7284954928121572
Iteration: 1600 ---> Cost: 0.7275024776845193
Iteration: 1610 ---> Cost: 0.72651856617443
Iteration: 1620 ---> Cost: 0.7255435805372382
Iteration: 1630 ---> Cost: 0.7245773473593948
Iteration: 1640 ---> Cost: 0.7236196974280192
Iteration: 1650 ---> Cost: 0.7226704656051243
Iteration: 1660 ---> Cost: 0.7217294907063018
Iteration: 1670 ---> Cost: 0.7207966153836918
Iteration: 1680 ---> Cost: 0.719871686013054
Iteration: 1690 ---> Cost: 0.7189545525847821
Iteration: 1700 ---> Cost: 0.7180450685986964
Iteration: 1710 ---> Cost: 0.7171430909624675
Iteration: 1720 ---> Cost: 0.7162484798935242
Iteration: 1730 ---> Cost: 0.7153610988243064
Iteration: 1740 ---> Cost: 0.714480814310738
Iteration: 1750 ---> Cost: 0.713607495943784
Iteration: 1760 ---> Cost: 0.7127410162639792
Iteration: 1770 ---> Cost: 0.7118812506788113
Iteration: 1780 ---> Cost: 0.7110280773828472
Iteration: 1790 ---> Cost: 0.710181377280501
Iteration: 1800 ---> Cost: 0.709341033911338
Iteration: 1810 ---> Cost: 0.7085069333778248
###Markdown
Getting the Train Accuracy
###Code
train_preds = nn.test(X_train)
count = 0
for i in range(X_train.shape[1]):
if train_preds[i] == y_train[i]:
count += 1
print("Training Accuracy: ", count / X_train.shape[1])
###Output
Training Accuracy: 0.9507440476190476
###Markdown
Getting the Validation Accuracy
###Code
X_valid = X_valid.to_numpy().transpose()
X_valid = X_valid / 255
print("X_valid: ", X_valid.shape)
print("y_valid: ", y_valid.shape)
valid_preds = nn.test(X_valid)
count = 0
for i in range(X_valid.shape[1]):
if valid_preds[i] == y_valid[i]:
count += 1
print("Validation Accuracy: ", count / X_valid.shape[1])
###Output
Validation Accuracy: 0.945
###Markdown
Getting the Test Data
###Code
test_data = pd.read_csv("./test.csv")
test_data.head()
X_test = test_data.to_numpy().transpose()
X_test = X_test / 255
print("X_test: ", X_test.shape)
test_preds = nn.test(X_test)
test_preds.shape
###Output
_____no_output_____
###Markdown
Creating the Output CSV File
###Code
imageid = np.arange(test_preds.shape[0]) + 1
imageid
df = pd.DataFrame({'ImageId': imageid, 'Label': test_preds})
df.head()
df.to_csv('output_N_Hidden_Layers.csv', index = False)
###Output
_____no_output_____ |
wk12/prophet.ipynb | ###Markdown
ProphetWe'll talk about what Prophet is in some detail, momentarily, but first, you're going to want to start the install process in Anaconda Prompt (or if you're on a Mac, you can use Terminal). It's not a fast installation.To install:```Pythonconda install -c conda-forge fbprophet```Note: I also had to `conda install plotly` before my import statement would work, and you might too. Prophet is a really powerful, if really picky-about-formatting, time series forecasting tool. It's available in both R and Python, though we're only focusing on the latter in this class. It was developed by Facebook, and (Coral's opinion, here, which nobody is required to share) making Prophet open source is the one little bit of social good Facebook has ever been responsible for.It deals really well with seasonality, including holidays or events that we know about ahead of time (e.g. it deals fine with New Year's or the Super Bowl). It will tolerate a little bit of missing data and, they claim, a few large outliers—but everyone who uses Prophet says "just replace outliers with NAs." And it does not require linearity. From [Facebook's Python Prophet API documentation](https://facebook.github.io/prophet/docs/quick_start.htmlpython-api):> The input to Prophet is always a dataframe with two columns: `ds` and `y`. The `ds` (datestamp) column should be ofa format expected by Pandas, ideally `YYYY-MM-DD` for a date or `YYYY-MM-DD HH:MM:SS` for a timestamp. The `y` column must be numeric, and represents the measurement we wish to forecast.
###Code
import pandas as pd
from fbprophet import Prophet
from matplotlib import pyplot
from sklearn.metrics import mean_squared_error, mean_absolute_error
from pandas.tseries.holiday import USFederalHolidayCalendar as calendar
###Output
_____no_output_____
###Markdown
Let's look at hourly power consumption from Duquesne Light (via [PJM Interconnection LLC](https://dataminer2.pjm.com/feed/hrl_load_metered)). First, we pull our dataset into a Pandas dataframe:
###Code
df = pd.read_csv('DUQ_hourly.csv', parse_dates=[['datetime_date','datetime_time']])
df.head() # we start on 1/1/2017
df.tail() # we end part-way through 10/2/2020
# now we need to rename columns
# to match Prophet's preferred (required) naming
df = df.rename(columns={"datetime_date_datetime_time": "ds", "mw": "y"})
df = df[["ds", "y"]]
df.head()
df.dtypes
# let's have a look
df.plot(x="ds", y="y", style='.', figsize=(15,5), title='Energy Usage')
pyplot.show()
###Output
_____no_output_____
###Markdown
Now we need to set up our Prophet object. There are a number of parameters we can feed it, but if we don't include any, it will do its best to choose appropriate ones.We can also call `fit` on our dataframe, to fit the model to our dataset. This is where our column names, locations, and date formats all have to be correct.
###Code
m = Prophet()
m.fit(df) # this takes a few seconds
print("done")
###Output
C:\Users\csheldon-hess\Anaconda3\lib\site-packages\pystan\misc.py:399: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
elif np.issubdtype(np.asarray(v).dtype, float):
###Markdown
Now, we can have Prophet make a prediction, using the `predict` method. It creates a new dataframe with a prediction for each row in the dataframe we feed it (`yhat`), as well as uncertainty intervals (`yhat_lower` to `yhat_upper`).
###Code
# OK, now let's get 10/2 - 11/8
actuals_df = pd.read_csv('DUQ_hourly_oct_nov_2020.csv', parse_dates=[['datetime_date','datetime_time']])
actuals_df = actuals_df.rename(columns={"datetime_date_datetime_time": "ds", "mw": "y"})
actuals_df.head()
fcst = m.predict(df=actuals_df)
fcst.head()
fcst.tail()
# let's examine our forecast
fig1 = m.plot(fcst)
# and we can look at the components of the forecast
fig1 = m.plot_components(fcst)
###Output
_____no_output_____
###Markdown
OK, let's plot our forecast (green) versus the actual values (black)
###Code
fig1 = pyplot.figure()
fig1.set_figheight(5)
fig1.set_figwidth(15)
ax0 = fig1.add_subplot(121)
ax0.plot_date(x=df["ds"], y=df["y"], fmt="k-")
ax0.plot_date(x=fcst["ds"], y=fcst["yhat"], fmt="g-")
ax0.plot_date(x=actuals_df["ds"], y=actuals_df["y"], fmt="k-")
pyplot.ylim(0,3000)
ax0.xaxis.set_tick_params(rotation=30, labelsize=10)
ax1 = fig1.add_subplot(122)
ax1.plot_date(x=fcst["ds"], y=fcst["yhat"], fmt="g-")
ax1.plot_date(x=actuals_df["ds"], y=actuals_df["y"], fmt="k-")
pyplot.ylim(0,3000)
ax1.xaxis.set_tick_params(rotation=30, labelsize=10)
pyplot.show()
###Output
_____no_output_____
###Markdown
That looks? Pretty good? But perhaps we should measure.
###Code
MSE = mean_squared_error(y_true=actuals_df['y'], y_pred=fcst['yhat'])
print(MSE)
MAE = mean_absolute_error(y_true=actuals_df['y'], y_pred=fcst['yhat'])
print(MAE)
###Output
109.51461086775501
###Markdown
Let's see if we do any better after adding holidays.
###Code
cal = calendar()
train_holidays = cal.holidays(start=df["ds"][0], end=df["ds"][len(df["ds"]) - 1])
test_holidays = cal.holidays(start=actuals_df["ds"][0], end=actuals_df["ds"][len(actuals_df["ds"]) - 1])
# make a df of holidays (ds + column reading "USFederalHoliday")
# code adapted from https://www.kaggle.com/robikscube/time-series-forecasting-with-prophet
# set each date in df to True or False for if it's a holiday
df["is_holiday"] = df.ds.isin([d.date() for d in cal.holidays()])
actuals_df["is_holiday"] = actuals_df.ds.isin([d.date() for d in cal.holidays()])
# get just the rows that are holidays, and stick them in a new df
# (need holidays in both training AND test dfs)
holiday_df = df.loc[df["is_holiday"]]
hol_df2 = actuals_df.loc[actuals_df["is_holiday"]]
# glue those two dfs together
holiday_df = holiday_df.append(hol_df2, ignore_index = True)
# we need each holiday labeled "USFederalHoliday," so we make a column
holiday_df.insert(3, "holiday", "USFederalHoliday")
# then we cut out the columns we no longer need
holiday_df = holiday_df.drop(["y", "is_holiday"], axis=1)
df = df.drop(["is_holiday"], axis = 1)
actuals_df = actuals_df.drop(["is_holiday"], axis = 1)
# make our ds back into a datetime
holiday_df["ds"] = pd.to_datetime(holiday_df["ds"])
holiday_df.head()
mh = Prophet(holidays=holiday_df)
mh.fit(df) # this takes a few seconds
fcst_h = mh.predict(df=actuals_df)
# let's examine our forecast
fig2 = mh.plot(fcst_h)
fig2 = pyplot.figure()
fig2.set_figheight(5)
fig2.set_figwidth(15)
ax0 = fig2.add_subplot(121)
ax0.plot_date(x=df["ds"], y=df["y"], fmt="k-")
ax0.plot_date(x=fcst_h["ds"], y=fcst_h["yhat"], fmt="g-")
ax0.plot_date(x=actuals_df["ds"], y=actuals_df["y"], fmt="k-")
pyplot.ylim(0,3000)
ax0.xaxis.set_tick_params(rotation=30, labelsize=10)
ax1 = fig2.add_subplot(122)
ax1.plot_date(x=fcst_h["ds"], y=fcst_h["yhat"], fmt="g-")
ax1.plot_date(x=actuals_df["ds"], y=actuals_df["y"], fmt="k-")
pyplot.ylim(0,3000)
ax1.xaxis.set_tick_params(rotation=30, labelsize=10)
pyplot.show()
print("MSE, no holidays:", MSE)
print("MAE, no holidays:", MAE)
MSE_h = mean_squared_error(y_true=actuals_df['y'], y_pred=fcst_h['yhat'])
MAE_h = mean_absolute_error(y_true=actuals_df['y'], y_pred=fcst_h['yhat'])
print("MSE, holidays:", MSE_h)
print("MAE, holidays:", MAE_h)
fig2 = mh.plot_components(fcst_h)
###Output
_____no_output_____ |
Copy_of_igbobert5.ipynb | ###Markdown
###Code
!wget -c https://github.com/IgnatiusEzeani/IGBONLP/raw/master/ig_monoling/text.zip
!wget -c https://raw.githubusercontent.com/chiamaka249/lacuna_pos_ner/main/language_corpus/ibo/ibo.txt
# !wget -c https://github.com/chiamaka249/IgboNER/blob/main/config.json
import zipfile
import os
def unzip(zipfilename):
try:
with zipfile.ZipFile(zipfilename, 'r') as zip_ref:
zip_ref.extractall(zipfilename[:-4])
return f"'{zipfilename}' unzipped!"
except FileNotFoundError:
print(f"Cannot find '{zipfilename}' file")
unzip("text.zip")
!rm text.zip
#copies the file "ibo.txt" to into the folder "text"
import shutil
shutil.copy('/content/ibo.txt', '/content/text')
# import os
#import shutil
dir_name = "/content/text"
text=""
for fname in os.listdir(dir_name):
fname = os.path.join(dir_name, fname)
with open(fname, "r", encoding="utf8") as datafile:
text = text+"\n"+datafile.read()
with open("data.txt", "w", encoding="utf8") as datafile:
datafile.write(text)
shutil.rmtree("text")
# We won't need TensorFlow here
!pip uninstall -y tensorflow
# Install `transformers` from master
!pip install git+https://github.com/huggingface/transformers
!pip list | grep -E 'transformers|tokenizers'
# transformers version at notebook update --- 2.11.0
# tokenizers version at notebook update --- 0.8.0rc1
%%time
from pathlib import Path
from tokenizers import ByteLevelBPETokenizer
paths = [str(x) for x in Path(".").glob("**/*.txt")]
# Initialize a tokenizer
tokenizer = ByteLevelBPETokenizer()
# Customize training
tokenizer.train(files=paths, vocab_size=52_000, min_frequency=2, special_tokens=[
"<s>",
"<pad>",
"</s>",
"<unk>",
"<mask>",
])
!mkdir igbo_bert4
tokenizer.save_model("igbo_bert4")
from tokenizers.implementations import ByteLevelBPETokenizer
from tokenizers.processors import BertProcessing
tokenizer = ByteLevelBPETokenizer(
"./igbo_bert4/vocab.json",
"./igbo_bert4/merges.txt",
)
tokenizer._tokenizer.post_processor = BertProcessing(
("</s>", tokenizer.token_to_id("</s>")),
("<s>", tokenizer.token_to_id("<s>")),
)
tokenizer.enable_truncation(max_length=512)
tokenizer.encode("Simone gara ụka ụnyahụ guọ egwu ma ga-kwa taa.").tokens
# Check that we have a GPU
!nvidia-smi
# Check that PyTorch sees it
import torch
torch.cuda.is_available()
from transformers import RobertaConfig
config = RobertaConfig(
vocab_size=52_000,
max_position_embeddings=514,
num_attention_heads=12,
num_hidden_layers=6,
type_vocab_size=1,
)
#from google.colab import files
#files.upload()
from transformers import RobertaTokenizerFast
tokenizer = RobertaTokenizerFast.from_pretrained("./igbo_bert4", max_len=512)
from transformers import RobertaForMaskedLM
model = RobertaForMaskedLM(config=config)
model.num_parameters()
# => 83 million parameters
%%time
from transformers import LineByLineTextDataset
dataset = LineByLineTextDataset(
tokenizer = tokenizer,
file_path = "/content/data.txt",
block_size = 128
)
from transformers import DataCollatorForLanguageModeling
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer, mlm=True, mlm_probability=0.15
)
from transformers import Trainer, TrainingArguments
training_args = TrainingArguments(
output_dir="./igbo_bert4",
overwrite_output_dir=True,
num_train_epochs=5,
per_gpu_train_batch_size=64,
save_steps=10_000,
save_total_limit=2,
prediction_loss_only=True,
)
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset=dataset,
)
%%time
trainer.train()
trainer.save_model("./igbo_bert4")
from transformers import pipeline
fill_mask = pipeline(
"fill-mask",
model="./igbo_bert4",
tokenizer="./igbo_bert4"
)
# The sun <mask>.
# =>
fill_mask("Abụ m Maazị <mask>.") #= okafor/Ọkafọ
# fill_mask("Nwaanyị na <mask> ji na akara.") #=eri
# The sun <mask>.
# =>
fill_mask("Nwaanyị na <mask> ji na akara.") #= eri
# fill_mask("Nwaanyị na <mask> ji na akara.") #=eri
# The sun <mask>.
# =>
fill_mask("Chineke ga- ebibikwa ndị niile na- eme ihe <mask>.") #=ọjọọ
# fill_mask("Nwaanyị na <mask> ji na akara.") #=eri
fill_mask("ọba akwụkwọ Ọkammụta Kenneth Dike dị <mask>.") #n'Awka
# This is the beginning of a beautiful <mask>.
# =>
# The sun <mask>.
# =>
fill_mask("Nwaanyị na eri <mask> na akara.") #= ji
# fill_mask("Nwaanyị na <mask> ji na akara.") #=eri
# The sun <mask>.
# =>
fill_mask("Gaanụ mee ndị <mask> niile ka ha bụrụ ndị na- eso ụzọ m .") #= mba
# The sun <mask>.
# =>
fill_mask("Jehova họpụtara Mozis ka ọ bụrụ onye ndú ụmụ <mask>.") #= Izrel
# The sun <mask>.
# =>
fill_mask("Ụmụakwụkwọ Chibok anọọla ụbọchị 2000 n’ aka <mask> Haram.") #= Boko
# The sun <mask>.
# =>
fill_mask("Nwunye Gọvanọ Ekiti steeti bụ Bisi Fayemi so na ndị na- akwado <mask> ọhụrụ a.") #= iwu
# The sun <mask>.
# =>
fill_mask(" <mask> sị ka ehiwe ụlọikpe pụrụiche maka mpụ.") #= Buhari
# The sun <mask>.
# =>
fill_mask("Ala <mask> ga- eweta ezi ọnọdụ nchekwa maka ndị chọrọ ịwebata ego n’ ọrụ ugbo.") #= Naịjirịa
# The sun <mask>.
# =>
fill_mask("Ọ bụ <mask>a ka a na- arịa .") #= mmadụ
# fill_mask("Nwaanyị na <mask> ji na akara.") #=eri
from google.colab import drive
drive.mount('/content/gdrive')
#from google.colab import files
#files.download("/content/igbo_bert4.zip")
shutil.move('/content/igbo_bert4','/content/gdrive/MyDrive/igbo_bert')
# shutil.make_archive("/content/igbo_bert4", 'zip', "igbo_bert4")
# model_save_name = '/igbo_bert4.zip'
# path = F"/content/gdrive/My Drive/igbo_bert/{model_save_name}"
# torch.save(model.state_dict(), path)
shutil.copy('/content/gdrive/MyDrive/igbo_bert/igbo_bert4','/content/sample_data')
###Output
_____no_output_____ |
tutorials/text-as-data/exercises/2-nlp-exercises.ipynb | ###Markdown
DSFM text-as-data workshop 2. Basic of Natural Language Processing Creator: [Data Science for Managers - EPFL Program](https://www.dsfm.ch)Source: [https://github.com/dsfm-org/code-bank.git](https://github.com/dsfm-org/code-bank.git)License: [MIT License](https://opensource.org/licenses/MIT). See open source [license](LICENSE) in the Code Bank repository. OverviewWhen dealing with text data, we can look at it under different perspectives. We can, for instance, look at a single sentence to study and capture the linguistic level. This include for instance finding named entities (named entity recognition) or finding part of speeech tags (verb, adverb, ...). Another approach is to consider the whole document as a single entity and look for similar documents, one simple solution being count the words that documents share in commons. This action is equivalent to represent a document as a vector and compute distances between vectors. Part 1: NLP with spaCyWe already used spaCy ([spacy.io](https://spacy.io/)) in the previous notebook to tokenize the text and find the stopwords. spaCy's catchphrase is "Industrial strength NLP in Python". spaCy is known to be fast and simple to use.Spacy can help to answer these questions: - What this text is talking about? - What do the words mean in this context? - What companies and products are mentioned? The main features of spaCy are: - Tokenization - Part-of-speech (POS) Tagging. The action to assign word types to tokens, like verb or noun. - Dependency Parsing. A tool to describe relations between individual tokens (see next). - Named Entity Recognition. Find entities such as person name or firm name in a text.Resources: - [spaCy 101](https://spacy.io/usage/spacy-101), the official getting-started tutorial. Q1: Load the `review_clean.csv` CSV file into a Pandas DataFrame `df` and display the first 5 reviews.
###Code
import numpy as np
# Fix random seed for reproducibility
np.random.seed(42)
import pandas as pd
df = # YOUR CODE HERE #
df.head(# YOUR CODE HERE #)
###Output
_____no_output_____
###Markdown
Q2: Store in a `first_review` variable the first review and display it on screen.
###Code
first_review = # YOUR CODE HERE #
first_review
###Output
_____no_output_____
###Markdown
Part of speech (POS) tagging is the process of assigning grammatical properties (noun, verb, adjective, adverb, etc.) to words. spaCy models use both the definition of the words and its context to determine the right tag. Q3: Using spaCy, apply POS tagging to the first and look at the results. What is the part of speech tag for the `-` token? What about `phone` and `Jana`?> ☝️At line 11 we are overwriting the default printing function with another one from the [rich](https://github.com/willmcgugan/rich) library. This allows to pretty print the data.
###Code
import spacy
nlp = spacy.load("en_core_web_sm")
doc = nlp(# YOUR CODE HERE #)
pos = []
for token in doc:
pos.append(# YOUR CODE HERE #)
from rich import print
print(pos[-10:])
###Output
_____no_output_____
###Markdown
Q4: Using [spaCy visualizer](https://spacy.io/usage/visualizers), display the dependency parse tree of the first sentence of the first review.
###Code
first_sentence = # YOUR CODE HERE #
print(f"First sentence is: {first_sentence}")
doc = nlp(# YOUR CODE HERE #)
from spacy import displacy
displacy.render(# YOUR CODE HERE #, style="dep")
###Output
_____no_output_____
###Markdown
Q5: Using the spaCy recognizer, look at the named entities of the first and second reviews. Which information can you get it out of it? Can you use a similar function in your daily job?
###Code
doc = nlp(# YOUR CODE HERE #)
displacy.render(# YOUR CODE HERE #, style="ent")
second_review = # YOUR CODE HERE #
doc = nlp(# YOUR CODE HERE #)
displacy.render(# YOUR CODE HERE #, style="ent")
###Output
_____no_output_____
###Markdown
Part 2: Vector SpaceAs machines cannot understand human languages as we do, we are required to somehow transform text data into a numeric format. The idea is to _map_ every review to a numeric vector. Q1 (**theory**): You just received 10 thousands new contracts. You need to categorize them in 10 different sub-categories in an efficient way. What do you do? You don't have access to any information a priori, neither your data have some metadata nor labels. Describe in layman terms how you would proceed. **Answers**1. Count the word occurrence in each document and create a document-term matrix count.1. Apply a clustering algorithm such as k-means (with k = 10 in this case) and find the different clusters. Q2: [Texthero](https://texthero.org/) is a simple toolkit to preprocess and analyze text-based dataset. Texthero is still in beta and therefore some parts might change in future releases.With the aid of Texthero, represent each reviews by counting words. Select only the first 500 most common words.If you need help, you can have a look at the [getting-started](https://texthero.org/docs/getting-started) tutorial.
###Code
import texthero as hero
df['count'] = # YOUR CODE HERE #
df['count']
###Output
_____no_output_____
###Markdown
Q3: By applying principal component analysis, reduce the dimension of the vector space to two.
###Code
df['pca'] = # YOUR CODE HERE #
df['pca']
###Output
_____no_output_____
###Markdown
Q4: Visualize the obtained vector space, can you identify any pattern?
###Code
from matplotlib.pyplot import rcParams
rcParams['figure.figsize'] = 10, 8
import seaborn as sns; sns.set()
import seaborn
seaborn.# YOUR CODE HERE #
###Output
_____no_output_____
###Markdown
Q5: Find the most similar reviews to the second review. For this, you will need to compute the distance between every review and pick the closet one. You can use the [cosine_similarity](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.cosine_similarity.html) function from `scikit-learn`.
###Code
from sklearn.metrics.pairwise import cosine_similarity
first_review_vector = # YOUR CODE HERE #
first_review_vector
cosine_similarity(
np.asarray(list(df['count'])), np.array(# YOUR CODE HERE #).reshape(1, -1)
).reshape(1, -1)[0].argsort()[::-1]
df.iloc[# YOUR CODE HERE #]['text']
df.iloc[# YOUR CODE HERE #]['text']
df.iloc[# YOUR CODE HERE #]['text']
###Output
_____no_output_____
###Markdown
Part 3: Topic modellingTopic modeling is a unsupervised learning method. The goal is to find group of different document of the same "topic". Topic Models are useful for uncovering hidden structure in a collection of texts. There are two common algorithms: Latent Semantic Analysis (LSA) and Latent Dirichlet Allocation (LDA).There are different python libraries that can be used to compute topic modeling, Gensim and Scikit-learn are very common. Gensim documentation is not always crystal clear and can be complex to use in some scenario. For this part, we will use scikit-learn, in particular [CountVectorizer](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html) and [LatentDirichletAllocation](https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.LatentDirichletAllocation.html). Q1: Store into a variable `reviews` all reviews and compute the "review-term" matrix (`review_term_matrix`) using CountVectorizer. Then, display the shape of the obtained matrix. Does it look like what you expected?> ☝️ For a faster computation, you can limit the number of terms to 500 (`max_features=500`).> ☝️ Make sure you use the "text_clean" column with stopwords removed (otherwise stopwords will pollute the topics)
###Code
from sklearn.feature_extraction.text import CountVectorizer
reviews = list(df['text_clean'])
vectorizer = # YOUR CODE HERE #
review_term_matrix = # YOUR CODE HERE #
review_term_matrix.shape
###Output
_____no_output_____
###Markdown
Q2: Apply the LDA algorithm to the obtained `review_term_matrix`. You will need to specify the number of topics you want to compute as well as the number of iterations for the LDA algorithm.
###Code
from sklearn.decomposition import LatentDirichletAllocation
lda = LatentDirichletAllocation(# YOUR CODE HERE #)
lda.fit(review_term_matrix);
###Output
_____no_output_____
###Markdown
Q3: The below function `print_top_words` display on screen the top words in each "cluster". Display the most common 15 words for each cluster. What do you notice?
###Code
def print_top_words(scikit_learn_model, feature_names, num_top_words):
for topic_num, topic in enumerate(scikit_learn_model.components_):
print(f"Topic #{topic_num}")
print(" ".join([feature_names[i]
for i in topic.argsort()[:-num_top_words - 1:-1]]))
print()
print_top_words(# YOUR CODE HERE #)
###Output
_____no_output_____
###Markdown
Q4: If you wish, you can play around with the obtained topic modelling by executing this lines of code:> ☝️[PyLDAvis](https://github.com/bmabey/pyLDAvis) is a beautiful and simple library to visualize topic models
###Code
import pyLDAvis
import pyLDAvis.sklearn
# topic_vis_data = pyLDAvis.sklearn.prepare(lda, review_term_matrix, vectorizer)
# pyLDAvis.display(topic_vis_data)
###Output
_____no_output_____ |
Spike Challenge.ipynb | ###Markdown
Spike Challenge Octubre 2019 Ivan Jara Varela
###Code
import pandas as pd
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from sklearn.model_selection import train_test_split
%matplotlib inline
###Output
_____no_output_____
###Markdown
Importadas las librerías a utilizar, comenzamos: Número 1Descarga del archivo csv para crear el dataframe.
###Code
#df = pd.read_gbq('SELECT * FROM public.caudal_extra_min',project_id='spikelab')
## Para base de datos en BigQuery (no tenía acceso)
df = pd.read_csv('caudal_extra.csv')
###Output
_____no_output_____
###Markdown
Número 2 Hay varias filas con datos faltantes, porque no todas las cuencas tienen estaciones de medición de precipitación o temperatura.
###Code
num_rows_nan_t = df['temp_max_promedio'].isna().sum()
num_rows_nan_p = df['precip_promedio'].isna().sum()
print('Hay ',num_rows_nan_t,' filas sin registro de temperatura')
print('Hay ',num_rows_nan_p,' filas sin registro de precipitación')
###Output
_____no_output_____
###Markdown
Podemos ver un resumen de las tres principales variables
###Code
print(df[['caudal','temp_max_promedio','precip_promedio']].describe())
###Output
_____no_output_____
###Markdown
También podemos ver si la distribución de las variables es asimétrica
###Code
print(df[['caudal','temp_max_promedio','precip_promedio']].skew())
###Output
_____no_output_____
###Markdown
Las mediciones de caudal y precipitación se concentran más a la derecha (mediciones de caudal mayores al promedio) y las de temperatura a la izquierda (menores al promedio), considerando una distribución normal.Hay dos columnas iguales, gauge_id y codigo_estacion, por lo que se elimina una
###Code
print(set(df['gauge_id'] == df['codigo_estacion']))
df.drop(columns='gauge_id',inplace=True)
###Output
_____no_output_____
###Markdown
Hay columnas inútiles, como institución y fuente, porque tienen el mismo valor en todas las filas (mediciones)
###Code
print('Hay ',len(set(df['institucion'])),' valor para todas las filas de la columna institución')
print('Hay ',len(set(df['fuente'])),' valor para todas las filas de la columna fuente')
df.drop(columns=['institucion','fuente'],inplace=True)
###Output
_____no_output_____
###Markdown
También se puede saber cuantas cuencas y estaciones hay
###Code
num_cuencas = len(set(df['codigo_cuenca']))
num_estaciones = len(set(df['codigo_estacion']))
print('Hay ',num_cuencas,' cuencas, monitoreadas en ',num_estaciones,' estaciones')
###Output
_____no_output_____
###Markdown
Finalmente, dejamos solo las variables numéricas, transformamos la columna fecha a formato datetime y eliminamos el dataframe original
###Code
dff = df[['codigo_estacion',
'codigo_cuenca',
'cantidad_observaciones',
'altura',
'latitud',
'longitud',
'fecha',
'caudal',
'precip_promedio',
'temp_max_promedio']]
dff = df
dff['fecha'] = pd.to_datetime(dff['fecha'])
del df
###Output
_____no_output_____
###Markdown
Número 3 a) Las estaciones, mientras existen, tienen registros de caudal en todo el periodo; para la temperatura y la precipitación no es el caso.
###Code
def time_plot_una_estacion(codigo_estacion,columna,fecha_min,fecha_max):
df1 = dff[dff['codigo_estacion'] == codigo_estacion]
df1.set_index('fecha',inplace=True)
df1 = df1[[columna]]
df1 = df1[fecha_min:fecha_max]
return df1.plot(title = 'Código estación: ' + str(codigo_estacion))
###Output
_____no_output_____
###Markdown
A continuación hay un ejemplo de la implementación de la función.
###Code
time_plot_una_estacion(4540001,'caudal','1965-01-06','1976-02-13')
###Output
_____no_output_____
###Markdown
b)
###Code
def time_plot_estaciones_varias_columnas(codigo_estacion,columnas,fecha_min,fecha_max):
df1 = dff[dff['codigo_estacion'] == codigo_estacion]
df1.set_index('fecha',inplace=True)
df1 = df1[columnas]
df1 = 100*(df1 - df1.min())/(df1.max()- df1.min())
df1 = df1[fecha_min:fecha_max]
return df1.plot(title= 'est: ' + str(codigo_estacion) + ', normalizado con min/max')
###Output
_____no_output_____
###Markdown
Un ejemplo de la implementación:
###Code
time_plot_estaciones_varias_columnas(4540001,['caudal','precip_promedio','temp_max_promedio'],'1980-01-06','1985-02-13')
###Output
_____no_output_____
###Markdown
A modo de "prueba" se puede ver como la temperatura varía al pasar el año, lo que indica que parece estar bien. Número 4 Utilizando una muy buena forma de transformar el formato datetime a meteorological season (verano,invierno,primavera y otoño), adaptada desde [aquí](https://stackoverflow.com/questions/44124436/python-datetime-to-season)
###Code
dff['season'] = [(dt.month%12 + 3)//3 for dt in dff['fecha']]
###Output
_____no_output_____
###Markdown
Se crea una función para calcular el percentil 95
###Code
def per95(x):
return np.percentile(x,95)
###Output
_____no_output_____
###Markdown
Se crea un nuevo dataframe con pivot_table, donde los índices sean codigo_estacion y season, y los valores el percentil 95 de las 3 variables en estudio
###Code
df_q = pd.pivot_table(dff,index=['codigo_estacion','season'],values=['caudal','precip_promedio','temp_max_promedio'],\
aggfunc=per95)
###Output
_____no_output_____
###Markdown
Se crea una función para asignar 1 si un número es mayor, y 0 si es menor a otro
###Code
def mm(x,y):
if (x >= y):
return 1
else:
return 0
###Output
_____no_output_____
###Markdown
Ahora, se aplica la función anterior comparando el dataframe df_q y dff, creando las columnas requeridas, para luego eliminar df_q y los arrays temporales. Este toma 5 minutos en mi computador
###Code
c_xtrem = []
p_xtrem = []
t_xtrem = []
for dc,dp,dt,de,ds in zip(dff['caudal'],dff['precip_promedio'],\
dff['temp_max_promedio'],\
dff['codigo_estacion'],dff['season']):
xtrem = df_q.loc[(de,ds)]
c_xtrem.append(mm(dc,xtrem[0]))
p_xtrem.append(mm(dp,xtrem[1]))
t_xtrem.append(mm(dt,xtrem[2]))
dff['caudal_extremo'] = c_xtrem
dff['precip_extremo'] = p_xtrem
dff['temp_extremo'] = t_xtrem
del df_q, c_xtrem, p_xtrem, t_xtrem
dff[['codigo_estacion','season','caudal_extremo','precip_extremo','temp_extremo']].head()
###Output
_____no_output_____
###Markdown
con 1:verano 2:otoño 3:invierno y 4:primavera El filtro por el percentil 95 es una buena manera de capturar los eventos extremos. Otra forma sería aprovechar la aleatoriedad de los datos; se me ocurre que la distribución de eventos extremos en el tiempo tiene más entropía, pero por ahora no sé como calcular algo así. Número 5 Primero, se crea un nuevo dataframe, y utilizamos nuevamente pivot_tablet para hacer el aggregate de eventos extremos de caudal por cuenca
###Code
df_caudal = dff.pivot_table(index='codigo_cuenca',values='caudal_extremo',aggfunc=np.sum)
###Output
_____no_output_____
###Markdown
Luego, se grafica el número de ocurrencias de eventos extremos de caudal por cuenca
###Code
df_caudal.plot.bar(title='N° de ocurrencias por cuenca')
###Output
_____no_output_____
###Markdown
Podemos ver como la cantidad de eventos extremos de caudal entre algunas cuencas difiere en 1 orden de magnitud (10 veces más grandes) en algunos casos. Número 6 Se crea una nueva columna, 'year', y un nuevo dataframe, df6
###Code
years = [dt.year for dt in dff['fecha']]
dff['year'] = years
df6 = dff.drop(columns='fecha')
###Output
_____no_output_____
###Markdown
Por última vez, se utiliza pivot_table para efectuar el aggregate de las variables, esta vez con la siguiente función, que entrega el porcentaje de eventos extremos de cada variable en cada año
###Code
def perc100(x):
return 100*np.mean(x)
df6 = pd.pivot_table(dff,index='year',values=['caudal_extremo','precip_extremo','temp_extremo'],aggfunc=perc100)
###Output
_____no_output_____
###Markdown
Finalmente, se grafica y se eliminan las variables temporales
###Code
df6.plot(title = 'Eventos extremos por año')
del years, df6
###Output
_____no_output_____
###Markdown
Solo observando el gráfico anterior, los eventos de precipitación extrema han disminuido, los de caudal no parecen mostrar una tendencia y los de temperatura parecen subir. Considerar por separado cuencas, zonas geográficas u otras funciones de aggregate quizás muestren mas claramente alguna tendencia. Número 7 Se crean las variables 'month' y 'day', y se eliminan las filas que contengan valores missing
###Code
dff['month'] = [dt.month for dt in dff['fecha']]
dff['day'] = [dt.day for dt in dff['fecha']]
dff.dropna(inplace=True)
###Output
_____no_output_____
###Markdown
Se fija el tamaño del dataset de prueba en un 25%
###Code
test_s = 0.25
###Output
_____no_output_____
###Markdown
Se divide el dataset en features(datax) y objetivo(datay)
###Code
datax = dff[['codigo_estacion',
'codigo_cuenca',
'cantidad_observaciones',
'altura',
'latitud',
'longitud',
'year',
'month',
'day',
'caudal',
'precip_promedio',
'temp_max_promedio']]
datay = dff[['caudal_extremo']]
###Output
_____no_output_____
###Markdown
Se divide entre los train y test dataset
###Code
x_train,x_test,y_train,y_test = train_test_split(datax,datay,test_size=test_s)
###Output
_____no_output_____
###Markdown
Se entrenará un modelo K-Nearest Neighbors (KNN), con un radio de k=15, y ponderado por la distancia. El entrenamiento me tomó alrededor de 2 minutos
###Code
knn = KNeighborsClassifier(n_neighbors=15,weights='distance')
knn.fit(x_train,y_train.values.ravel())
y_pred = knn.predict(x_test)
###Output
_____no_output_____
###Markdown
Intenté ocupar todos los datos disponibles originales. Propongo utilizar las 11 variables para predecir caudal extremo en el futuro, o sea, simplemente ingresar los 11 valores requeridos del día a pronosticar. Como lo entiendo, más que predecir condiciones futuras, el modelo aprende los patrones cíclicos y la tendencia creciente o decreciente de la variable caudal extremo. KNN funciona en base a distancias euclidianas, por lo que en principio no hay restricciones numéricas para las features. Debería funcionar bien con datos razonables y dentro de las variaciones convencionales (no utilizar 60°C en temperatura, por ejemplo). Número 8 a) Utilizando la métrica más sencilla, que es la comparación entre los datos predichos y el dataset de prueba apartado en el punto anterior, se tiene que:
###Code
print('El modelo tiene una performance de ', round(metrics.accuracy_score(y_test, y_pred)*100,4),'%')
###Output
_____no_output_____
###Markdown
Para KNN no conozco una manera directa de hacer análisis de sensibilidad, pero hay variables que quizás esten entregando la misma información (redundantes), como la cuenca, estación y las coordenadas.Quizás sirve "apagar" una de las features, y entrenar el modelo para ver como afecta en la metrica utilizada para medir performance. A pesar de lo anterior, el entrenamiento no tarda mucho, y parece entregar buenos resultados. Es un buen modelo para comenzar. b) No comprendo la pregunta; en los modelos que conozco (y este en particular) creo que no se puede elegir exactamente qué datos se utilizaran para entrenar el modelo. Si la pregunta consiste en ocupar el 70% del dataset para entrenar, entonces
###Code
test_s = 0.3
x_train,x_test,y_train,y_test = train_test_split(datax,datay,test_size=test_s)
knn1 = KNeighborsClassifier(n_neighbors=15,weights='distance')
knn1.fit(x_train,y_train.values.ravel())
y_pred = knn1.predict(x_test)
print('El modelo tiene una performance de ', round(metrics.accuracy_score(y_test, y_pred)*100,4),'%')
###Output
_____no_output_____
###Markdown
Predicción de caudales extremos en Chile Carga y análisis de la data Analiza el dataset caudal_extra.csv. ¿Qué puedes decir de los datos, distribuciones, missing, u otros? ¿Hay algo que te llame la atención? ¿Por qué hay tantos valores missing? Pensar en la manera en que se elaboró el dataset, descrito más arriba. (Entregable: texto/imágenes)
###Code
#Paquetes que utilizaremos en este análisis
library(dplyr)
library(ggplot2)
library(reshape2)
library(lubridate)
library(randomForest)
library(pROC)
#Cargamos nuestro data, en este caso un archi csv
cuencas <- read.csv("caudal_extra.csv")
#Con el comando str podemos observar la estructura de nuestro data set
str(cuencas)
summary(cuencas)
cuencas %>%
select (c(13, 16, 17))%>%
na.omit()%>%
melt() %>%
ggplot(aes(x = value)) +
geom_density()+
facet_wrap( ~ variable, scale = "free", nrow = 1)
###Output
No id variables; using all as measure variables
###Markdown
Las distribuciones del caudal y la precipitación promedio son sesgadas a la izquierda mientras que la temperatura parece seguir una distribución bimodal. Plots de precipitación, temperatura y caudal Plots de precipitación, temperatura y caudala. Escribir una función que tome como input una estación y haga plot de los datos para una columna. Debiese tener estos argumentos:def time_plot_una_estacion(codigo_estacion, columna, fecha_min, fecha_max):
###Code
time_plot_una_estacion <- function(codigo_estacion, columna, fecha_min, fecha_max){
x<- codigo_estacion #renombramos el valor ingresado como x para evitar confusión con el nombre del factor
cuencas %>%
mutate(fecha = as.Date(cuencas$fecha, format = "%Y-%m-%d")) %>% #convertimos la columna fecha a tipo fecha
filter(.$fecha >= fecha_min & .$fecha <= fecha_max) %>% #filtramos por las fechas que ingresen a la función
filter(codigo_estacion == x) %>% #filtramos por el código de estación que ingresen a la función
na.omit() %>% #omitimos los NAs
ggplot(aes(x = .$fecha, y = .[,columna])) + #hacemos el plot con la columna que ingresen a la función
geom_line(col="#00cc99", size = 0.6) + #cambiamos grosor y color de la línea
theme(axis.ticks.x = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(fill = NA,colour = "grey"))+ #cambiamos el color del borde del panel
scale_x_date(date_labels = "%Y-%m") + #convertimos el eje x a fecha con formato año-mes
xlab("Date") + ylab(columna)
}
#Ejemplo
time_plot_una_estacion("11335002", "temp_max_promedio", "2017-01-01", "2017-12-01")
###Output
_____no_output_____
###Markdown
b. Ahora escribir una función que haga plots de varias columnas, para poder visualizar caudal, precipitación y temperatura al mismo tiempo. Como las series están en diferentes escalas, sugerimos normalizarlas antes de hacer el plot (por ejemplo, dividiendo por la primera observación de cada serie)def time_plot_estaciones_varias_columnas(codigo_estacion, columnas, fecha_min, fecha_max):
###Code
time_plot_varias_columnas <- function(codigo_estacion, columnas, fecha_min, fecha_max){
x<- codigo_estacion #renombramos el valor ingresado como x para evitar confusión con el nombre del factor
funcion <- cuencas[c("codigo_estacion", columnas, "fecha")] #Elejimos las columnas de interés
funcion <- funcion %>%
mutate(fecha = as.Date(.$fecha, format = "%Y-%m-%d")) %>% #convertimos la columna fecha a tipo fecha
filter(.$fecha >= fecha_min & .$fecha <= fecha_max) %>% #filtramos por las fechas que ingresen a la función
arrange(.$fecha) %>% #ordenamos por fecha
filter(codigo_estacion == x) #filtramos por el código de estación que ingresen a la función
for(i in columnas){ #creamos un loop que itera por cuántas columnas ponga la persona
varname <- i #asignamos i como nombre de la variable
funcion <- funcion %>%
mutate(!!varname := funcion[, i]*100/funcion[1, i]) #convertimos la variable i para normalizar a 100 toda la columna por su primer valor
}
funcion %>%
melt(id= c("codigo_estacion", "fecha")) %>% #convertimos las variables "columnas" en formato largo, dejando fijas codigo_estacion y fecha
ggplot(aes(x = .$fecha, y = value, col = variable)) +
geom_line()+
theme(axis.ticks.x = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(fill = NA,colour = "grey"), #cambiamos el color del borde del panel
legend.title = element_blank())+ #eliminamos el título de la leyenda
scale_x_date(date_labels = "%Y-%m") + #convertimos el eje x a fecha con formato año-mes
xlab("Date") + ylab("")
} #Limitaciones de la función: si el valor inicial de la columna es cero no funciona
#Ejemplo
time_plot_varias_columnas("11335002", c("caudal", "precip_promedio", "temp_max_promedio"), "2017-01-01", "2017-12-01")
###Output
_____no_output_____
###Markdown
Variables extremas Crea tres variables llamadas: ● caudal_extremo ● temp_extremo ● precip_extremoDichas variables toman valor 1 un día si el caudal/temperatura/precipitación (según sea el caso) observado ese día es extremo. Esto significa que es mayor de lo "esperado". Para capturar esta idea, el valor de caudal, por ejemplo, toma valor 1 si está sobre el percentil 95 de ese caudal para esa estación del año (Verano, Primavera, Otoño, Invierno). Toma valor 0 cuando está bajo ese percentil.En otras palabras, para cada estación de medición y para cada estación del año, debes considerar la distribución histórica de caudal/ temperatura/ precipitación para elegir ese percentil 95. Esta medida toma en cuenta la estacionalidad, pues, por ejemplo, una temperatura de 25 grados en invierno puede ser extrema, pero en verano es normal. También toma en cuenta que cada cuenca (o estación) es diferente. Lo que es extremo para una cuenca no lo es para la otra.
###Code
#Tomaremos estas fechas como las estaciones
#Verano: desde el 21 de diciembre (solsticio) al 20 de marzo (equinoccio).
#Otoño: desde el 20 de marzo (equinoccio) al 21 de junio (solsticio).
#Invierno: desde el 21 de junio (solsticio) al 21 de septiembre (equinoccio).
#Primavera: desde el 21 de septiembre (equinoccio) al 21 de diciembre (solsticio).
#Creamos una columna con las estaciones
estaciones <- cuencas %>%
mutate(fecha = as.Date(cuencas$fecha, format = "%Y-%m-%d")) %>% #convertimos la columna fecha a tipo fecha
mutate(estaciones = ifelse(as.Date(format(.$fecha, format = "%m-%d"), format = "%m-%d")
%in% c(as.Date("03-01", format = "%m-%d"):
as.Date("06-21", format = "%m-%d")), "Otoño",
ifelse(as.Date(format(.$fecha, format = "%m-%d"), format = "%m-%d")
%in% c(as.Date("06-22", format = "%m-%d"):
as.Date("09-21", format = "%m-%d", )), "Invierno",
ifelse(as.Date(format(.$fecha, format = "%m-%d"), format = "%m-%d")
%in% c(as.Date("09-22", format = "%m-%d"):
as.Date("12-21", format = "%m-%d")), "Primavera",
"Verano")))) #usamos el comando mutate para crear la columna estaciones y con ifelse creamos las varias condiciones para clasificar cada estación
#Creamos las variables: caudal_extremo, temp_extremo y precip_extremo
percentil95 <- estaciones %>%
mutate(codigo_cuenca = as.factor(as.numeric(codigo_cuenca))) %>% #convertimos la columna codigo_cuenca a factor
group_by(codigo_estacion, estaciones) %>% #agrupamos por estación
na.omit() %>% #omitimos los NAs
mutate(caudal_95 = quantile(caudal, .95), #creamos columnas con que tengan el valor del percentil 95 para las 3 variables
temp_95 = quantile(temp_max_promedio, .95),
precip_95 = quantile(precip_promedio, .95)) %>%
ungroup() %>% # desagrupamos
mutate(caudal_extremo =ifelse(caudal > caudal_95, 1, 0), #creamos las nuevas variables usando ifelse y la condición de ser mayor al percentil 95
temp_extremo = ifelse(temp_max_promedio > temp_95, 1, 0),
precip_extremo = ifelse(precip_promedio > precip_95, 1, 0))
###Output
_____no_output_____
###Markdown
¿Les parece razonable esta medida para capturar algo “extremo”? ¿Usarían otra? ¿Cuál? (Solamente descríbanla, no la codifiquen! Vamos a usar la definición de Spike para esta desafío) También usaría los valores estremos que están por debajo del 5% Caudal extremo Análisis de la variable caudal_extremo en las diferentes cuencas
###Code
cuencas %>%
summarise(n_distinct(codigo_cuenca)) #con esto podemos ver la cantidad de distintas cuencas que tenemos
percentil95 %>%
mutate(caudal_extemo = as.factor(caudal_extremo)) %>% #convertimos caudal_extremo en factor
group_by(codigo_cuenca, estaciones, caudal_extremo) %>% #agrupamos las variables de interés
summarise(Avcaudal=mean(caudal))%>% #calculamo el promedio del caudal
ggplot(aes(x = codigo_cuenca, y = Avcaudal, fill =factor(caudal_extremo)))+
geom_bar(stat = "identity", position = "dodge")+ #dodge nos permite separar las barras para caudal_extremo igual a 1 y 0
theme(axis.text.x = element_text(angle = 90))+ #colocamos el texto del eje x en vertical
facet_wrap(~estaciones, scales = "fixed", nrow = 2) #dividimos por paneles según estaciones, con la escala de los ejes fijos y dispuestos en dos filas
###Output
_____no_output_____
###Markdown
Analicen la variable caudal_extremo. Los comportamientos en diferentes cuencas son muy diferentes? Sí, las cuencas son bastante diferentes, unas con relativamente poco caudal en comparación con otras. También podemos ver en el gráfico que una misma cuenca puede tener comportamientos de caudal extremo incluso en la misma estación del año. Podemos ver que en algunos casos el promedio del caudal es más del doble para una misma cuenca y estación del año, al comparar caudal_extremo igual a 1 e igual a 0. Porcentaje de eventos extremos a través del tiempo Hagan un plot del porcentaje de eventos extremos a través del tiempo (caudal_extremo, temp_extremo, precip_extremo).
###Code
percentil95 %>%
mutate(year = lubridate::year(fecha),
month = lubridate::month(fecha),
day = lubridate::day(fecha)) %>%
group_by(year, estaciones) %>% #agrupamos según nuestras variables de interés
summarise(percentage_caudal =(sum(caudal_extremo)/n())*100, #pasamos las variables de interés a porcentaje
percentage_temp =(sum(temp_extremo)/n())*100,
percentage_precip =(sum(precip_extremo)/n())*100) %>%
melt(id= c("year", "estaciones")) %>% #convertimos las variables "columnas" en formato largo, dejando fijas año y estaciones
ggplot(aes(x = year, y = value, col = variable))+
geom_line()+
theme(axis.ticks.x = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(fill = NA, colour = "grey"), #cambiamos el color del borde del panel
legend.title = element_blank())+ #eliminamos el título de la leyenda
facet_wrap(~estaciones, scales = "fixed", nrow = 2)+
ylab("Porcentaje")
###Output
_____no_output_____
###Markdown
Se han vuelto más o menos comunes? Es difícil poder sacar algún patrón claro, pero se puede observar que particularmente la temperatura ha mostrado un aumento en los últimos años, en otoño. También se puede observar que para primevera e invierno ha ido aumentando el porcentaje mínimo de eventos extremos para las 3 variables. Predicción de caudal extremo Predicción de caudal extremo. Entrena uno o varios modelos (usando el/los algoritmo(s) que prefieras) para estimar la probabilidad de un caudal extremo (la variable binaria caudal_extremo). Siéntete libre de generar variables adicionales y/o complementar con variables externas.
###Code
#Primero limpiamos el dataframe
percentil95_clean <- percentil95 %>%
select(-c(3:5, 9:11, 14, 15, 19:21)) %>% #sacamos las columnas que no nos interesan
mutate(year = lubridate::year(fecha), #creamos columnas con la fecha dividida en mes, día y año
mes = lubridate::month(fecha),
dia = lubridate::day(fecha)) %>%
mutate(estaciones = as.factor(.$estaciones), #convertimos a factor las columnas de interés
caudal_extremo = as.factor(.$caudal_extremo),
temp_extremo = as.factor(.$temp_extremo),
precip_extremo = as.factor(.$precip_extremo)) %>%
na.omit()
str(percentil95_clean) #Nos aseguramos que todo esté correcto
set.seed(158) #Asignamos una semilla para poder reproducir los resultados
#Dividimos nuestra data en datos de entrenamiento del modelo y datos de prueba
train_subset <- percentil95_clean %>%
sample_frac(.70) #En este caso dividiremos la data en 70% de entrenamiento y el 30% de prueba
test_subset <- anti_join(percentil95_clean, train_subset, by = 'X') #anti_join nos permite agarrar las filas que no corresponden al set de prueba usando como referencia la columna X que posee un número único para cada fila
#De las 16 variables eliminaremos las variables X, codigo_estacion, precip_extremo y temp_extremo
train <- train_subset %>%
select(-c(1:3, 12:13))
test <- test_subset %>%
select(-c(1:3, 12:13))
str(train)
#Calculamos la correlación entre nuestras variables para ver si hay alguna redundante
correlation <- train %>%
mutate(fecha = as.numeric(fecha), #convertimos estas columnas para calcular la correlación ya que solo acepta caracteres numéricos
caudal_extremo = as.numeric(caudal_extremo)) %>%
select(-7) %>% #Y quitamos de la matriz la variable estaciones
cor()
round(correlation, 3)
###Output
_____no_output_____
###Markdown
Ya que fecha y año son redundantes sacaremos la variable año. Latitud y longitud también son redundantes por lo que sacaremos la varible latitud
###Code
train_data <- train %>%
select(-c(1, 9))%>% #sacamos las variables redundantes
do(sample_n(., 200000)) #remuestree el subset para agarrar menos datos por la capacidad de mi laptop
test_data <- test %>%
select(-c(1, 9))%>%
do(sample_n(., 85714)) #también se remuestreó preservando los porcentajes 70, 30.
###Output
_____no_output_____
###Markdown
Elegí utilizar el algoritmo de random forest ya que es uno de los más precisos, que puede manejar gran cantidad de datos,tanto categóricos como númericos y que no necesita de la transformación previa de los datos.
###Code
#Tiene que tener espacio suficiente en la RAM para correr el modelo
rf_caudal_extremo <- randomForest(
caudal_extremo ~ .,
data=train_data
)
rf_caudal_extremo
varImpPlot(rf_caudal_extremo) #podemos observar las variables más importantes
#Analizamos que tan bien predice el modelo los datos de prueba
pred_caudal_extremo <- predict(rf_caudal_extremo, newdata=test_data[-7])
table(test_data$caudal_extremo, pred_caudal_extremo) #Podemos ver la matriz de confusión de nuestras predicciones
prop.table(table(test_data$caudal_extremo, pred_caudal_extremo), margin = 1) #Podemos ver el porcentaje de precisión para los casos de caudal extremo igual a 1
###Output
_____no_output_____
###Markdown
Ya que la variable caudal_extremo tiene poca representación de los valores extremos creé un dataset con que tuviesen un 19% de los datos como cauda_extremo igual a 1 y así que el modelo esté mejor entrenado para manejar estos casos.
###Code
f.sample <- function(a, percent) a[sample(nrow(a), nrow(a)*percent, replace = TRUE),]
train_balanced <- train %>%
select(-c(1, 9)) #sacamos las variables redundantes
train_ex <- f.sample(train_balanced[train_balanced$caudal_extremo==1,], 0.9) #Elección de 90% de datos con caudal extremo = 1
train_no_ex <- f.sample(train_balanced[train_balanced$caudal_extremo==0,], 0.2) #Elección de 20% de datos con caudal extremo = 0
train_rf_balanced <- rbind(train_ex,train_no_ex) #Data de entrenamiento con 19% de datos con caudal extremo
test_balanced <- test %>%
select(-c(1, 9)) #sacamos las variables redundantes
test_ex <- f.sample(test_balanced[test_balanced$caudal_extremo==1,], 0.9) #Elección de 90% de datos con caudal extremo = 1
test_no_ex <- f.sample(test_balanced[test_balanced$caudal_extremo==0,], 0.2) #Elección de 20% de datos con caudal extremo = 0
test_rf_balanced <- rbind(test_ex,test_no_ex) #Data de prueba con 19% de datos con caudal extremo
rf_balanced <- randomForest(
caudal_extremo ~ .,
data=train_rf_balanced
)
rf_balanced
varImpPlot(rf_balanced) #podemos observar las variables más importantes
#Analizamos que tan bien predice el modelo los datos de prueba balanceados
pred_balanced_data <- predict(rf_balanced, newdata=test_rf_balanced[-7])
table(test_rf_balanced$caudal_extremo, pred_balanced_data) #Podemos ver la matriz de confusión de nuestras predicciones
prop.table(table(test_rf_balanced$caudal_extremo, pred_balanced_data), margin = 1) #Podemos ver el porcentaje de precisión para los casos de caudal extremo igual a 1
#Analizamos que tan bien predice el modelo los datos de prueba originales.
pred_test_data <- predict(rf_balanced, newdata=test_data[-7])
table(test_data$caudal_extremo, pred_test_data) #Podemos ver la matriz de confusión de nuestras predicciones
prop.table(table(test_data$caudal_extremo, pred_test_data), margin = 1) #Podemos ver el porcentaje de precisión para los casos de caudal extremo igual a 1
auc(as.numeric(test_data$caudal_extremo), as.numeric(pred_test_data)) #medimos la precisión del modelo en general para nuestras predicciones
###Output
_____no_output_____ |
notebooks/03_basic_preprocessing_categorical_variables_exercise_01.ipynb | ###Markdown
Exercise 02The goal of this exercise is to evalutate the impact of using an arbitraryinteger encoding for categorical variables along with a linearclassification model such as Logistic Regression.To do so, let's try to use `OrdinalEncoder` to preprocess the categoricalvariables. This preprocessor is assembled in a pipeline with`LogisticRegression`. The performance of the pipeline can be evaluated asusual by cross-validation and then compared to the score obtained when using`OneHotEncoding` or to some other baseline score.Because `OrdinalEncoder` can raise errors if it sees an unknown category atprediction time, we need to pre-compute the list of all possible categoriesahead of time:```pythoncategories = [data[column].unique() for column in data[categorical_columns]]OrdinalEncoder(categories=categories)```
###Code
import pandas as pd
df = pd.read_csv(
"https://www.openml.org/data/get_csv/1595261/adult-census.csv")
# Or use the local copy:
# df = pd.read_csv('../datasets/adult-census.csv')
target_name = "class"
target = df[target_name].to_numpy()
data = df.drop(columns=[target_name, "fnlwgt"])
categorical_columns = [
c for c in data.columns if data[c].dtype.kind not in ["i", "f"]]
data_categorical = data[categorical_columns]
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import LogisticRegression
# TODO: write me!
###Output
_____no_output_____
###Markdown
Exercise 02The goal of this exercise is to evalutate the impact of using an arbitraryinteger encoding for categorical variables along with a linearclassification model such as Logistic Regression.To do so, let's try to use `OrdinalEncoder` to preprocess the categoricalvariables. This preprocessor is assembled in a pipeline with`LogisticRegression`. The performance of the pipeline can be evaluated asusual by cross-validation and then compared to the score obtained when using`OneHotEncoding` or to some other baseline score.Because `OrdinalEncoder` can raise errors if it sees an unknown category atprediction time, we need to pre-compute the list of all possible categoriesahead of time:```pythoncategories = [data[column].unique() for column in data[categorical_columns]]OrdinalEncoder(categories=categories)```
###Code
import pandas as pd
df = pd.read_csv(
"https://www.openml.org/data/get_csv/1595261/adult-census.csv")
# Or use the local copy:
# df = pd.read_csv('../datasets/adult-census.csv')
target_name = "class"
target = df[target_name].to_numpy()
data = df.drop(columns=[target_name, "fnlwgt"])
categorical_columns = [
c for c in data.columns if data[c].dtype.kind not in ["i", "f"]]
data_categorical = data[categorical_columns]
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import LogisticRegression
# TODO: write me!
###Output
_____no_output_____
###Markdown
Exercise 02The goal of this exercise is to evalutate the impact of using an arbitraryinteger encoding for categorical variables along with a linearclassification model such as Logistic Regression.To do so, let's try to use `OrdinalEncoder` to preprocess the categoricalvariables. This preprocessor is assembled in a pipeline with`LogisticRegression`. The performance of the pipeline can be evaluated asusual by cross-validation and then compared to the score obtained when using`OneHotEncoding` or to some other baseline score.Because `OrdinalEncoder` can raise errors if it sees an unknown category atprediction time, we need to pre-compute the list of all possible categoriesahead of time:```pythoncategories = [data[column].unique() for column in data[categorical_columns]]OrdinalEncoder(categories=categories)```
###Code
import pandas as pd
df = pd.read_csv("https://www.openml.org/data/get_csv/1595261/adult-census.csv")
# Or use the local copy:
# df = pd.read_csv('../datasets/adult-census.csv')
target_name = "class"
target = df[target_name].to_numpy()
data = df.drop(columns=[target_name, "fnlwgt"])
categorical_columns = [c for c in data.columns
if data[c].dtype.kind not in ["i", "f"]]
data_categorical = data[categorical_columns]
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import LogisticRegression
# TODO: write me!
###Output
_____no_output_____ |
recommendation_systems.ipynb | ###Markdown
Effective movie recommendation system (also solves the cold-start problem)
###Code
%load_ext lab_black
import turicreate
import pandas as pd
import numpy as np
df = pd.read_csv("df.csv")
# remove unnamed column
del df["Unnamed: 0"]
df.head(5)
# Recommendation with Collaborative Filtering (memory-based algorithm, it recommends items based on previous ratings)
# User-User collaborative filtering (users having higher correlation will tend to be similar.)
# Item-Item collaborative filtering (item/movies that are similar to each other are recommended)
df_sf = turicreate.SFrame(df)
# User-User: recommend top 5 movies based on the most popular choices (all the users receive the same recommendations)
# Training the model
popularity_model = turicreate.popularity_recommender.create(
df_sf, user_id="Cust_Id", item_id="Movie_Id", target="Rating"
)
# Making recommendations (example) - print top 5 recommendations for the first 3 users
popularity_recomm = popularity_model.recommend(users=list(df_sf["Cust_Id"][0:3]), k=5)
popularity_recomm.print_rows(num_rows=15)
# Item-Item - recommend movies based on past personal preferences (different users will have a different set of recommendations - personalized recommendations)
# Training the model
item_sim_model = turicreate.item_similarity_recommender.create(
df_sf,
user_id="Cust_Id",
item_id="Movie_Id",
target="Rating",
similarity_type="cosine",
)
# Making recommendations (example) - print top 5 recommendations for the first 3 users
item_sim_recomm = item_sim_model.recommend(users=list(df_sf["Cust_Id"][0:3]), k=5)
item_sim_recomm.print_rows(num_rows=15)
# user Cold Start - Making recommendations for a new user
# (it is not possible to provide personalized recommendations for a new user)
# If the model has never seen the user,
# then it defaults to recommending popular items
if sum(df_sf["Cust_Id"] == 12) == 0:
print("The user 12 is new")
popularity_model.recommend(users=[12], k=5)
# item Cold Start - Making recommendations for a new item
# If the model has never seen the item,
# then it defaults to score = 0 (which is the minimum)
if sum(df_sf["Movie_Id"] == 12) == 0:
print("The item 12 is new")
item_sim_model.recommend(users=list(df_sf["Cust_Id"][0:3]), items=[12])
###Output
The item 12 is new
###Markdown
Copyright 2018 Google LLC.
###Code
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Recommendation Systems with TensorFlowThis Colab notebook goes into more detail about Recommendation Systems. Specifically, we'll be using matrix factorization to learn user and movie embeddings.Please **make a copy** of this Colab notebook before starting this lab. To do so, choose **File**->**Save a copy in Drive**.If possible, use a runtime with a GPU. This can be done using **Runtime**->**Change runtime type** and choosing Python 2, GPU. IntroductionWe will create a movie recommendation system based on the [MovieLens](https://movielens.org/) dataset available [here](http://grouplens.org/datasets/movielens/). The data consists of movies ratings (on a scale of 1 to 5). Outline 1. Exploring the MovieLens Data (10 minutes) 1. Preliminaries (25 minutes) 1. Training a matrix factorization model (15 minutes) 1. Inspecting the Embeddings (15 minutes) 1. Regularization in matrix factorization (15 minutes) 1. Softmax model training (30 minutes) SetupLet's get started by importing the required packages.
###Code
# @title Imports (run this cell)
from __future__ import print_function
import numpy as np
import pandas as pd
import collections
from mpl_toolkits.mplot3d import Axes3D
from IPython import display
from matplotlib import pyplot as plt
import sklearn
import sklearn.manifold
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
# Add some convenience functions to Pandas DataFrame.
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.3f}'.format
def mask(df, key, function):
"""Returns a filtered dataframe, by applying function to key"""
return df[function(df[key])]
def flatten_cols(df):
df.columns = [' '.join(col).strip() for col in df.columns.values]
return df
pd.DataFrame.mask = mask
pd.DataFrame.flatten_cols = flatten_cols
# Install Altair and activate its colab renderer.
print("Installing Altair...")
!pip install git+git://github.com/altair-viz/altair.git
import altair as alt
alt.data_transformers.enable('default', max_rows=None)
alt.renderers.enable('colab')
print("Done installing Altair.")
# Install spreadsheets and import authentication module.
USER_RATINGS = False
!pip install --upgrade -q gspread
from google.colab import auth
import gspread
from oauth2client.client import GoogleCredentials
###Output
_____no_output_____
###Markdown
We then download the MovieLens Data, and create DataFrames containing movies, users, and ratings.
###Code
# @title Load the MovieLens data (run this cell).
# Download MovieLens data.
print("Downloading movielens data...")
import urllib
import zipfile
urllib.urlretrieve("http://files.grouplens.org/datasets/movielens/ml-100k.zip", "movielens.zip")
zip_ref = zipfile.ZipFile('movielens.zip', "r")
zip_ref.extractall()
print("Done. Dataset contains:")
print(zip_ref.read('ml-100k/u.info'))
# Load each data set (users, movies, and ratings).
users_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code']
users = pd.read_csv(
'ml-100k/u.user', sep='|', names=users_cols, encoding='latin-1')
ratings_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp']
ratings = pd.read_csv(
'ml-100k/u.data', sep='\t', names=ratings_cols, encoding='latin-1')
# The movies file contains a binary feature for each genre.
genre_cols = [
"genre_unknown", "Action", "Adventure", "Animation", "Children", "Comedy",
"Crime", "Documentary", "Drama", "Fantasy", "Film-Noir", "Horror",
"Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western"
]
movies_cols = [
'movie_id', 'title', 'release_date', "video_release_date", "imdb_url"
] + genre_cols
movies = pd.read_csv(
'ml-100k/u.item', sep='|', names=movies_cols, encoding='latin-1')
# Since the ids start at 1, we shift them to start at 0.
users["user_id"] = users["user_id"].apply(lambda x: str(x-1))
movies["movie_id"] = movies["movie_id"].apply(lambda x: str(x-1))
movies["year"] = movies['release_date'].apply(lambda x: str(x).split('-')[-1])
ratings["movie_id"] = ratings["movie_id"].apply(lambda x: str(x-1))
ratings["user_id"] = ratings["user_id"].apply(lambda x: str(x-1))
ratings["rating"] = ratings["rating"].apply(lambda x: float(x))
# Compute the number of movies to which a genre is assigned.
genre_occurences = movies[genre_cols].sum().to_dict()
# Since some movies can belong to more than one genre, we create different
# 'genre' columns as follows:
# - all_genres: all the active genres of the movie.
# - genre: randomly sampled from the active genres.
def mark_genres(movies, genres):
def get_random_genre(gs):
active = [genre for genre, g in zip(genres, gs) if g==1]
if len(active) == 0:
return 'Other'
return np.random.choice(active)
def get_all_genres(gs):
active = [genre for genre, g in zip(genres, gs) if g==1]
if len(active) == 0:
return 'Other'
return '-'.join(active)
movies['genre'] = [
get_random_genre(gs) for gs in zip(*[movies[genre] for genre in genres])]
movies['all_genres'] = [
get_all_genres(gs) for gs in zip(*[movies[genre] for genre in genres])]
mark_genres(movies, genre_cols)
# Create one merged DataFrame containing all the movielens data.
movielens = ratings.merge(movies, on='movie_id').merge(users, on='user_id')
# Utility to split the data into training and test sets.
def split_dataframe(df, holdout_fraction=0.1):
"""Splits a DataFrame into training and test sets.
Args:
df: a dataframe.
holdout_fraction: fraction of dataframe rows to use in the test set.
Returns:
train: dataframe for training
test: dataframe for testing
"""
test = df.sample(frac=holdout_fraction, replace=False)
train = df[~df.index.isin(test.index)]
return train, test
###Output
_____no_output_____
###Markdown
I. Exploring the Movielens DataBefore we dive into model building, let's inspect our MovieLens dataset. It is usually helpful to understand the statistics of the dataset. UsersWe start by printing some basic statistics describing the numeric user features.
###Code
users.describe()
###Output
_____no_output_____
###Markdown
We can also print some basic statistics describing the categorical user features
###Code
users.describe(include=[np.object])
###Output
_____no_output_____
###Markdown
We can also create histograms to further understand the distribution of the users. We use Altair to create an interactive chart.
###Code
# @title Altair visualization code (run this cell)
# The following functions are used to generate interactive Altair charts.
# We will display histograms of the data, sliced by a given attribute.
# Create filters to be used to slice the data.
occupation_filter = alt.selection_multi(fields=["occupation"])
occupation_chart = alt.Chart().mark_bar().encode(
x="count()",
y=alt.Y("occupation:N"),
color=alt.condition(
occupation_filter,
alt.Color("occupation:N", scale=alt.Scale(scheme='category20')),
alt.value("lightgray")),
).properties(width=300, height=300, selection=occupation_filter)
# A function that generates a histogram of filtered data.
def filtered_hist(field, label, filter):
"""Creates a layered chart of histograms.
The first layer (light gray) contains the histogram of the full data, and the
second contains the histogram of the filtered data.
Args:
field: the field for which to generate the histogram.
label: String label of the histogram.
filter: an alt.Selection object to be used to filter the data.
"""
base = alt.Chart().mark_bar().encode(
x=alt.X(field, bin=alt.Bin(maxbins=10), title=label),
y="count()",
).properties(
width=300,
)
return alt.layer(
base.transform_filter(filter),
base.encode(color=alt.value('lightgray'), opacity=alt.value(.7)),
).resolve_scale(y='independent')
###Output
_____no_output_____
###Markdown
Next, we look at the distribution of ratings per user. Clicking on an occupation in the right chart will filter the data by that occupation. The corresponding histogram is shown in blue, and superimposed with the histogram for the whole data (in light gray). You can use SHIFT+click to select multiple subsets.What do you observe, and how might this affect the recommendations?
###Code
users_ratings = (
ratings
.groupby('user_id', as_index=False)
.agg({'rating': ['count', 'mean']})
.flatten_cols()
.merge(users, on='user_id')
)
# Create a chart for the count, and one for the mean.
alt.hconcat(
filtered_hist('rating count', '# ratings / user', occupation_filter),
filtered_hist('rating mean', 'mean user rating', occupation_filter),
occupation_chart,
data=users_ratings)
###Output
_____no_output_____
###Markdown
MoviesIt is also useful to look at information about the movies and their ratings.
###Code
movies_ratings = movies.merge(
ratings
.groupby('movie_id', as_index=False)
.agg({'rating': ['count', 'mean']})
.flatten_cols(),
on='movie_id')
genre_filter = alt.selection_multi(fields=['genre'])
genre_chart = alt.Chart().mark_bar().encode(
x="count()",
y=alt.Y('genre'),
color=alt.condition(
genre_filter,
alt.Color("genre:N"),
alt.value('lightgray'))
).properties(height=300, selection=genre_filter)
(movies_ratings[['title', 'rating count', 'rating mean']]
.sort_values('rating count', ascending=False)
.head(10))
(movies_ratings[['title', 'rating count', 'rating mean']]
.mask('rating count', lambda x: x > 20)
.sort_values('rating mean', ascending=False)
.head(10))
###Output
_____no_output_____
###Markdown
Finally, the last chart shows the distribution of the number of ratings and average rating.
###Code
# Display the number of ratings and average rating per movie.
alt.hconcat(
filtered_hist('rating count', '# ratings / movie', genre_filter),
filtered_hist('rating mean', 'mean movie rating', genre_filter),
genre_chart,
data=movies_ratings)
###Output
_____no_output_____
###Markdown
II. PreliminariesOur goal is to factorize the ratings matrix $A$ into the product of a user embedding matrix $U$ and movie embedding matrix $V$, such that $A \approx UV^\top$ with$U = \begin{bmatrix} u_{1} \\ \hline \vdots \\ \hline u_{N} \end{bmatrix}$ and$V = \begin{bmatrix} v_{1} \\ \hline \vdots \\ \hline v_{M} \end{bmatrix}$.Here- $N$ is the number of users,- $M$ is the number of movies,- $A_{ij}$ is the rating of the $j$th movies by the $i$th user,- each row $U_i$ is a $d$-dimensional vector (embedding) representing user $i$,- each rwo $V_j$ is a $d$-dimensional vector (embedding) representing movie $j$,- the prediction of the model for the $(i, j)$ pair is the dot product $\langle U_i, V_j \rangle$. Sparse Representation of the Rating MatrixThe rating matrix could be very large and, in general, most of the entries are unobserved, since a given user will only rate a small subset of movies. For effcient representation, we will use a [tf.SparseTensor](https://www.tensorflow.org/api_docs/python/tf/SparseTensor). A `SparseTensor` uses three tensors to represent the matrix: `tf.SparseTensor(indices, values, dense_shape)` represents a tensor, where a value $A_{ij} = a$ is encoded by setting `indices[k] = [i, j]` and `values[k] = a`. The last tensor `dense_shape` is used to specify the shape of the full underlying matrix. Toy exampleAssume we have $2$ users and $4$ movies. Our toy ratings dataframe has three ratings,user\_id | movie\_id | rating--:|--:|--:0 | 0 | 5.00 | 1 | 3.01 | 3 | 1.0The corresponding rating matrix is$$A =\begin{bmatrix}5.0 & 3.0 & 0 & 0 \\0 & 0 & 0 & 1.0\end{bmatrix}$$And the SparseTensor representation is,```pythonSparseTensor( indices=[[0, 0], [0, 1], [1,3]], values=[5.0, 3.0, 1.0], dense_shape=[2, 4])``` Exercise 1: Build a tf.SparseTensor representation of the Rating Matrix.In this exercise, we'll write a function that maps from our `ratings` DataFrame to a `tf.SparseTensor`.Hint: you can select the values of a given column of a Dataframe `df` using `df['column_name'].values`.
###Code
def build_rating_sparse_tensor(ratings_df):
"""
Args:
ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns.
Returns:
A tf.SparseTensor representing the ratings matrix.
"""
# ========================= Complete this section ============================
# indices =
# values =
# ============================================================================
return tf.SparseTensor(
indices=indices,
values=values,
dense_shape=[users.shape[0], movies.shape[0]])
#@title Solution
def build_rating_sparse_tensor(ratings_df):
"""
Args:
ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns.
Returns:
a tf.SparseTensor representing the ratings matrix.
"""
indices = ratings_df[['user_id', 'movie_id']].values
values = ratings_df['rating'].values
return tf.SparseTensor(
indices=indices,
values=values,
dense_shape=[users.shape[0], movies.shape[0]])
###Output
_____no_output_____
###Markdown
Calculating the errorThe model approximates the ratings matrix $A$ by a low-rank product $UV^\top$. We need a way to measure the approximation error. We'll start by using the Mean Squared Error of observed entries only (we will revisit this later). It is defined as$$\begin{align*}\text{MSE}(A, UV^\top)&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - (UV^\top)_{ij})^2} \\&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - \langle U_i, V_j\rangle)^2}\end{align*}$$where $\Omega$ is the set of observed ratings, and $|\Omega|$ is the cardinality of $\Omega$. Exercise 2: Mean Squared ErrorWrite a TensorFlow function that takes a sparse rating matrix $A$ and the two embedding matrices $U, V$ and returns the mean squared error $\text{MSE}(A, UV^\top)$.Hints: * in this section, we only consider observed entries when calculating the loss. * a `SparseTensor` `sp_x` is a tuple of three Tensors: `sp_x.indices`, `sp_x.values` and `sp_x.dense_shape`. * you may find [`tf.gather_nd`](https://www.tensorflow.org/api_docs/python/tf/gather_nd) and [`tf.losses.mean_squared_error`](https://www.tensorflow.org/api_docs/python/tf/losses/mean_squared_error) helpful.
###Code
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
# ========================= Complete this section ============================
# loss =
# ============================================================================
return loss
#@title Solution
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
predictions = tf.gather_nd(
tf.matmul(user_embeddings, movie_embeddings, transpose_b=True),
sparse_ratings.indices)
loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions)
return loss
###Output
_____no_output_____
###Markdown
Note: One approach is to compute the full prediction matrix $UV^\top$, then gather the entries corresponding to the observed pairs. The memory cost of this approach is $O(NM)$. For the MovieLens dataset, this is fine, as the dense $N \times M$ matrix is small enough to fit in memory ($N = 943$, $M = 1682$).Another approach (given in the alternate solution below) is to only gather the embeddings of the observed pairs, then compute their dot products. The memory cost is $O(|\Omega| d)$ where $d$ is the embedding dimension. In our case, $|\Omega| = 10^5$, and the embedding dimension is on the order of $10$, so the memory cost of both methods is comparable. But when the number of users or movies is much larger, the first approach becomes infeasible.
###Code
#@title Alternate Solution
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
predictions = tf.reduce_sum(
tf.gather(user_embeddings, sparse_ratings.indices[:, 0]) *
tf.gather(movie_embeddings, sparse_ratings.indices[:, 1]),
axis=1)
loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions)
return loss
###Output
_____no_output_____
###Markdown
Exercise 3 (Optional): adding your own ratings to the data set You have the option to add your own ratings to the data set. If you choose to do so, you will be able to see recommendations for yourself.Start by checking the box below. Running the next cell will authenticate you to your google Drive account, and create a spreadsheet, that contains all movie titles in column 'A'. Follow the link to the spreadsheet and take 3 minutes to rate some of the movies. Your ratings should be entered in column 'B'.
###Code
USER_RATINGS = True #@param {type:"boolean"}
# @title Run to create a spreadsheet, then use it to enter your ratings.
# Authenticate user.
if USER_RATINGS:
auth.authenticate_user()
gc = gspread.authorize(GoogleCredentials.get_application_default())
# Create the spreadsheet and print a link to it.
try:
sh = gc.open('MovieLens-test')
except(gspread.SpreadsheetNotFound):
sh = gc.create('MovieLens-test')
worksheet = sh.sheet1
titles = movies['title'].values
cell_list = worksheet.range(1, 1, len(titles), 1)
for cell, title in zip(cell_list, titles):
cell.value = title
worksheet.update_cells(cell_list)
print("Link to the spreadsheet: "
"https://docs.google.com/spreadsheets/d/{}/edit".format(sh.id))
###Output
_____no_output_____
###Markdown
Run the next cell to load your ratings and add them to the main `ratings` DataFrame.
###Code
# @title Run to load your ratings.
# Load the ratings from the spreadsheet and create a DataFrame.
if USER_RATINGS:
my_ratings = pd.DataFrame.from_records(worksheet.get_all_values()).reset_index()
my_ratings = my_ratings[my_ratings[1] != '']
my_ratings = pd.DataFrame({
'user_id': "943",
'movie_id': map(str, my_ratings['index']),
'rating': map(float, my_ratings[1]),
})
# Remove previous ratings.
ratings = ratings[ratings.user_id != "943"]
# Add new ratings.
ratings = ratings.append(my_ratings, ignore_index=True)
# Add new user to the users DataFrame.
if users.shape[0] == 943:
users = users.append(users.iloc[942], ignore_index=True)
users["user_id"][943] = "943"
print("Added your %d ratings; you have great taste!" % len(my_ratings))
ratings[ratings.user_id=="943"].merge(movies[['movie_id', 'title']])
###Output
_____no_output_____
###Markdown
III. Training a Matrix Factorization model CFModel (Collaborative Filtering Model) helper classThis is a simple class to train a matrix factorization model using stochastic gradient descent.The class constructor takes- the user embeddings U (a `tf.Variable`).- the movie embeddings V, (a `tf.Variable`).- a loss to optimize (a `tf.Tensor`).- an optional list of metrics dictionaries, each mapping a string (the name of the metric) to a tensor. These are evaluated and plotted during training (e.g. training error and test error).After training, one can access the trained embeddings using the `model.embeddings` dictionary.Example usage:```U_var = ...V_var = ...loss = ...model = CFModel(U_var, V_var, loss)model.train(iterations=100, learning_rate=1.0)user_embeddings = model.embeddings['user_id']movie_embeddings = model.embeddings['movie_id']```
###Code
# @title CFModel helper class (run this cell)
class CFModel(object):
"""Simple class that represents a collaborative filtering model"""
def __init__(self, embedding_vars, loss, metrics=None):
"""Initializes a CFModel.
Args:
embedding_vars: A dictionary of tf.Variables.
loss: A float Tensor. The loss to optimize.
metrics: optional list of dictionaries of Tensors. The metrics in each
dictionary will be plotted in a separate figure during training.
"""
self._embedding_vars = embedding_vars
self._loss = loss
self._metrics = metrics
self._embeddings = {k: None for k in embedding_vars}
self._session = None
@property
def embeddings(self):
"""The embeddings dictionary."""
return self._embeddings
def train(self, num_iterations=100, learning_rate=1.0, plot_results=True,
optimizer=tf.train.GradientDescentOptimizer):
"""Trains the model.
Args:
iterations: number of iterations to run.
learning_rate: optimizer learning rate.
plot_results: whether to plot the results at the end of training.
optimizer: the optimizer to use. Default to GradientDescentOptimizer.
Returns:
The metrics dictionary evaluated at the last iteration.
"""
with self._loss.graph.as_default():
opt = optimizer(learning_rate)
train_op = opt.minimize(self._loss)
local_init_op = tf.group(
tf.variables_initializer(opt.variables()),
tf.local_variables_initializer())
if self._session is None:
self._session = tf.Session()
with self._session.as_default():
self._session.run(tf.global_variables_initializer())
self._session.run(tf.tables_initializer())
tf.train.start_queue_runners()
with self._session.as_default():
local_init_op.run()
iterations = []
metrics = self._metrics or ({},)
metrics_vals = [collections.defaultdict(list) for _ in self._metrics]
# Train and append results.
for i in range(num_iterations + 1):
_, results = self._session.run((train_op, metrics))
if (i % 10 == 0) or i == num_iterations:
print("\r iteration %d: " % i + ", ".join(
["%s=%f" % (k, v) for r in results for k, v in r.items()]),
end='')
iterations.append(i)
for metric_val, result in zip(metrics_vals, results):
for k, v in result.items():
metric_val[k].append(v)
for k, v in self._embedding_vars.items():
self._embeddings[k] = v.eval()
if plot_results:
# Plot the metrics.
num_subplots = len(metrics)+1
fig = plt.figure()
fig.set_size_inches(num_subplots*10, 8)
for i, metric_vals in enumerate(metrics_vals):
ax = fig.add_subplot(1, num_subplots, i+1)
for k, v in metric_vals.items():
ax.plot(iterations, v, label=k)
ax.set_xlim([1, num_iterations])
ax.legend()
return results
###Output
_____no_output_____
###Markdown
Exercise 4: Build a Matrix Factorization model and train itUsing your `sparse_mean_square_error` function, write a function that builds a `CFModel` by creating the embedding variables and the train and test losses.
###Code
def build_model(ratings, embedding_dim=3, init_stddev=1.):
"""
Args:
ratings: a DataFrame of the ratings
embedding_dim: the dimension of the embedding vectors.
init_stddev: float, the standard deviation of the random initial embeddings.
Returns:
model: a CFModel.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
# ========================= Complete this section ============================
# A_train =
# A_test =
# ============================================================================
# Initialize the embeddings using a normal distribution.
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
# ========================= Complete this section ============================
# train_loss =
# test_loss =
# ============================================================================
metrics = {
'train_error': train_loss,
'test_error': test_loss
}
embeddings = {
"user_id": U,
"movie_id": V
}
return CFModel(embeddings, train_loss, [metrics])
#@title Solution
def build_model(ratings, embedding_dim=3, init_stddev=1.):
"""
Args:
ratings: a DataFrame of the ratings
embedding_dim: the dimension of the embedding vectors.
init_stddev: float, the standard deviation of the random initial embeddings.
Returns:
model: a CFModel.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
# Initialize the embeddings using a normal distribution.
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
train_loss = sparse_mean_square_error(A_train, U, V)
test_loss = sparse_mean_square_error(A_test, U, V)
metrics = {
'train_error': train_loss,
'test_error': test_loss
}
embeddings = {
"user_id": U,
"movie_id": V
}
return CFModel(embeddings, train_loss, [metrics])
###Output
_____no_output_____
###Markdown
Great, now it's time to train the model!Go ahead and run the next cell, trying different parameters (embedding dimension, learning rate, iterations). The training and test errors are plotted at the end of training. You can inspect these values to validate the hyper-parameters.Note: by calling `model.train` again, the model will continue training starting from the current values of the embeddings.
###Code
# Build the CF model and train it.
model = build_model(ratings, embedding_dim=30, init_stddev=0.5)
model.train(num_iterations=1000, learning_rate=10.)
###Output
_____no_output_____
###Markdown
The movie and user embeddings are also displayed in the right figure. When the embedding dimension is greater than 3, the embeddings are projected on the first 3 dimensions. The next section will have a more detailed look at the embeddings. IV. Inspecting the EmbeddingsIn this section, we take a closer look at the learned embeddings, by- computing your recommendations- looking at the nearest neighbors of some movies,- looking at the norms of the movie embeddings,- visualizing the embedding in a projected embedding space. Exercise 5: Write a function that computes the scores of the candidatesWe start by writing a function that, given a query embedding $u \in \mathbb R^d$ and item embeddings $V \in \mathbb R^{N \times d}$, computes the item scores.As discussed in the lecture, there are different similarity measures we can use, and these can yield different results. We will compare the following:- dot product: the score of item j is $\langle u, V_j \rangle$.- cosine: the score of item j is $\frac{\langle u, V_j \rangle}{\|u\|\|V_j\|}$.Hints:- you can use [`np.dot`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) to compute the product of two np.Arrays.- you can use [`np.linalg.norm`](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.norm.html) to compute the norm of a np.Array.
###Code
DOT = 'dot'
COSINE = 'cosine'
def compute_scores(query_embedding, item_embeddings, measure=DOT):
"""Computes the scores of the candidates given a query.
Args:
query_embedding: a vector of shape [k], representing the query embedding.
item_embeddings: a matrix of shape [N, k], such that row i is the embedding
of item i.
measure: a string specifying the similarity measure to be used. Can be
either DOT or COSINE.
Returns:
scores: a vector of shape [N], such that scores[i] is the score of item i.
"""
# ========================= Complete this section ============================
# scores =
# ============================================================================
return scores
#@title Solution
DOT = 'dot'
COSINE = 'cosine'
def compute_scores(query_embedding, item_embeddings, measure=DOT):
"""Computes the scores of the candidates given a query.
Args:
query_embedding: a vector of shape [k], representing the query embedding.
item_embeddings: a matrix of shape [N, k], such that row i is the embedding
of item i.
measure: a string specifying the similarity measure to be used. Can be
either DOT or COSINE.
Returns:
scores: a vector of shape [N], such that scores[i] is the score of item i.
"""
u = query_embedding
V = item_embeddings
if measure == COSINE:
V = V / np.linalg.norm(V, axis=1, keepdims=True)
u = u / np.linalg.norm(u)
scores = u.dot(V.T)
return scores
###Output
_____no_output_____
###Markdown
Equipped with this function, we can compute recommendations, where the query embedding can be either a user embedding or a movie embedding.
###Code
# @title User recommendations and nearest neighbors (run this cell)
def user_recommendations(model, measure=DOT, exclude_rated=False, k=6):
if USER_RATINGS:
scores = compute_scores(
model.embeddings["user_id"][943], model.embeddings["movie_id"], measure)
score_key = measure + ' score'
df = pd.DataFrame({
score_key: list(scores),
'movie_id': movies['movie_id'],
'titles': movies['title'],
'genres': movies['all_genres'],
})
if exclude_rated:
# remove movies that are already rated
rated_movies = ratings[ratings.user_id == "943"]["movie_id"].values
df = df[df.movie_id.apply(lambda movie_id: movie_id not in rated_movies)]
display.display(df.sort_values([score_key], ascending=False).head(k))
def movie_neighbors(model, title_substring, measure=DOT, k=6):
# Search for movie ids that match the given substring.
ids = movies[movies['title'].str.contains(title_substring)].index.values
titles = movies.iloc[ids]['title'].values
if len(titles) == 0:
raise ValueError("Found no movies with title %s" % title_substring)
print("Nearest neighbors of : %s." % titles[0])
if len(titles) > 1:
print("[Found more than one matching movie. Other candidates: {}]".format(
", ".join(titles[1:])))
movie_id = ids[0]
scores = compute_scores(
model.embeddings["movie_id"][movie_id], model.embeddings["movie_id"],
measure)
score_key = measure + ' score'
df = pd.DataFrame({
score_key: list(scores),
'titles': movies['title'],
'genres': movies['all_genres']
})
display.display(df.sort_values([score_key], ascending=False).head(k))
###Output
_____no_output_____
###Markdown
Your recommendationsIf you chose to input your recommendations, you can run the next cell to generate recommendations for you.
###Code
user_recommendations(model, measure=COSINE, k=5)
###Output
_____no_output_____
###Markdown
How do the recommendations look? Movie Nearest neighborsLet's look at the neareast neighbors for some of the movies.
###Code
movie_neighbors(model, "Aladdin", DOT)
movie_neighbors(model, "Aladdin", COSINE)
###Output
_____no_output_____
###Markdown
It seems that the quality of learned embeddings may not be very good. This will be addressed in Section V by adding several regularization techniques. First, we will further inspect the embeddings. Movie Embedding NormWe can also observe that the recommendations with dot-product and cosine are different: with dot-product, the model tends to recommend popular movies. This can be explained by the fact that in matrix factorization models, the norm of the embedding is often correlated with popularity (popular movies have a larger norm), which makes it more likely to recommend more popular items. We can confirm this hypothesis by sorting the movies by their embedding norm, as done in the next cell.
###Code
# @title Embedding Visualization code (run this cell)
def movie_embedding_norm(models):
"""Visualizes the norm and number of ratings of the movie embeddings.
Args:
model: A MFModel object.
"""
if not isinstance(models, list):
models = [models]
df = pd.DataFrame({
'title': movies['title'],
'genre': movies['genre'],
'num_ratings': movies_ratings['rating count'],
})
charts = []
brush = alt.selection_interval()
for i, model in enumerate(models):
norm_key = 'norm'+str(i)
df[norm_key] = np.linalg.norm(model.embeddings["movie_id"], axis=1)
nearest = alt.selection(
type='single', encodings=['x', 'y'], on='mouseover', nearest=True,
empty='none')
base = alt.Chart().mark_circle().encode(
x='num_ratings',
y=norm_key,
color=alt.condition(brush, alt.value('#4c78a8'), alt.value('lightgray'))
).properties(
selection=nearest).add_selection(brush)
text = alt.Chart().mark_text(align='center', dx=5, dy=-5).encode(
x='num_ratings', y=norm_key,
text=alt.condition(nearest, 'title', alt.value('')))
charts.append(alt.layer(base, text))
return alt.hconcat(*charts, data=df)
def visualize_movie_embeddings(data, x, y):
nearest = alt.selection(
type='single', encodings=['x', 'y'], on='mouseover', nearest=True,
empty='none')
base = alt.Chart().mark_circle().encode(
x=x,
y=y,
color=alt.condition(genre_filter, "genre", alt.value("whitesmoke")),
).properties(
width=600,
height=600,
selection=nearest)
text = alt.Chart().mark_text(align='left', dx=5, dy=-5).encode(
x=x,
y=y,
text=alt.condition(nearest, 'title', alt.value('')))
return alt.hconcat(alt.layer(base, text), genre_chart, data=data)
def tsne_movie_embeddings(model):
"""Visualizes the movie embeddings, projected using t-SNE with Cosine measure.
Args:
model: A MFModel object.
"""
tsne = sklearn.manifold.TSNE(
n_components=2, perplexity=40, metric='cosine', early_exaggeration=10.0,
init='pca', verbose=True, n_iter=400)
print('Running t-SNE...')
V_proj = tsne.fit_transform(model.embeddings["movie_id"])
movies.loc[:,'x'] = V_proj[:, 0]
movies.loc[:,'y'] = V_proj[:, 1]
return visualize_movie_embeddings(movies, 'x', 'y')
movie_embedding_norm(model)
###Output
_____no_output_____
###Markdown
Note: Depending on how the model is initialized, you may observe that some niche movies (ones with few ratings) have a high norm, leading to spurious recommendations. This can happen if the embedding of that movie happens to be initialized with a high norm. Then, because the movie has few ratings, it is infrequently updated, and can keep its high norm. This will be alleviated by using regularization.Try changing the value of the hyper-parameter `init_stddev`. One quantity that can be helpful is that the expected norm of a $d$-dimensional vector with entries $\sim \mathcal N(0, \sigma^2)$ is approximatley $\sigma \sqrt d$.How does this affect the embedding norm distribution, and the ranking of the top-norm movies?
###Code
#@title Solution
model = build_model(ratings, embedding_dim=30, init_stddev=0.05)
model.train(num_iterations=1000, learning_rate=10.)
movie_neighbors(model, "Aladdin", DOT)
movie_neighbors(model, "Aladdin", COSINE)
movie_embedding_norm(model)
###Output
_____no_output_____
###Markdown
Embedding visualizationSince it is hard to visualize embeddings in a higher-dimensional space (when the embedding dimension $k > 3$), one approach is to project the embeddings to a lower dimensional space. T-SNE (T-distributed Stochastic Neighbor Embedding) is an algorithm that projects the embeddings while attempting to preserve their pariwise distances. It can be useful for visualization, but one should use it with care. For more information on using t-SNE, see [How to Use t-SNE Effectively](https://distill.pub/2016/misread-tsne/).
###Code
tsne_movie_embeddings(model)
###Output
_____no_output_____
###Markdown
You can highlight the embeddings of a given genre by clicking on the genres panel (SHIFT+click to select multiple genres).We can observe that the embeddings do not seem to have any notable structure, and the embeddings of a given genre are located all over the embedding space. This confirms the poor quality of the learned embeddings. One of the main reasons, which we will address in the next section, is that we only trained the model on observed pairs, and without regularization. V. Regularization In Matrix FactorizationIn the previous section, our loss was defined as the mean squared error on the observed part of the rating matrix. As discussed in the lecture, this can be problematic as the model does not learn how to place the embeddings of irrelevant movies. This phenomenon is known as *folding*.We will add regularization terms that will address this issue. We will use two types of regularization:- Regularization of the model parameters. This is a common $\ell_2$ regularization term on the embedding matrices, given by $r(U, V) = \frac{1}{N} \sum_i \|U_i\|^2 + \frac{1}{M}\sum_j \|V_j\|^2$.- A global prior that pushes the prediction of any pair towards zero, called the *gravity* term. This is given by $g(U, V) = \frac{1}{MN} \sum_{i = 1}^N \sum_{j = 1}^M \langle U_i, V_j \rangle^2$.The total loss is then given by$$\frac{1}{|\Omega|}\sum_{(i, j) \in \Omega} (A_{ij} - \langle U_i, V_j\rangle)^2 + \lambda _r r(U, V) + \lambda_g g(U, V)$$where $\lambda_r$ and $\lambda_g$ are two regularization coefficients (hyper-parameters). Exercise 6: Build a regularized Matrix Factorization model and train itWrite a function that builds a regularized model. You are given a function `gravity(U, V)` that computes the gravity term given the two embedding matrices $U$ and $V$.
###Code
def gravity(U, V):
"""Creates a gravity loss given two embedding matrices."""
return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum(
tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True))
def build_regularized_model(
ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1.,
init_stddev=0.1):
"""
Args:
ratings: the DataFrame of movie ratings.
embedding_dim: The dimension of the embedding space.
regularization_coeff: The regularization coefficient lambda.
gravity_coeff: The gravity regularization coefficient lambda_g.
Returns:
A CFModel object that uses a regularized loss.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
# ========================= Complete this section ============================
# error_train =
# error_test =
# gravity_loss =
# regularization_loss =
# ============================================================================
total_loss = error_train + regularization_loss + gravity_loss
losses = {
'train_error': error_train,
'test_error': error_test,
}
loss_components = {
'observed_loss': error_train,
'regularization_loss': regularization_loss,
'gravity_loss': gravity_loss,
}
embeddings = {"user_id": U, "movie_id": V}
return CFModel(embeddings, total_loss, [losses, loss_components])
# @title Solution
def gravity(U, V):
"""Creates a gravity loss given two embedding matrices."""
return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum(
tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True))
def build_regularized_model(
ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1.,
init_stddev=0.1):
"""
Args:
ratings: the DataFrame of movie ratings.
embedding_dim: The dimension of the embedding space.
regularization_coeff: The regularization coefficient lambda.
gravity_coeff: The gravity regularization coefficient lambda_g.
Returns:
A CFModel object that uses a regularized loss.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
error_train = sparse_mean_square_error(A_train, U, V)
error_test = sparse_mean_square_error(A_test, U, V)
gravity_loss = gravity_coeff * gravity(U, V)
regularization_loss = regularization_coeff * (
tf.reduce_sum(U*U)/U.shape[0].value + tf.reduce_sum(V*V)/V.shape[0].value)
total_loss = error_train + regularization_loss + gravity_loss
losses = {
'train_error_observed': error_train,
'test_error_observed': error_test,
}
loss_components = {
'observed_loss': error_train,
'regularization_loss': regularization_loss,
'gravity_loss': gravity_loss,
}
embeddings = {"user_id": U, "movie_id": V}
return CFModel(embeddings, total_loss, [losses, loss_components])
###Output
_____no_output_____
###Markdown
It is now time to train the regularized model! You can try different values of the regularization coefficients, and different embedding dimensions.
###Code
reg_model = build_regularized_model(
ratings, regularization_coeff=0.1, gravity_coeff=1.0, embedding_dim=35,
init_stddev=.05)
reg_model.train(num_iterations=2000, learning_rate=20.)
###Output
_____no_output_____
###Markdown
Observe that adding the regularization terms results in a higher MSE, both on the training and test set. However, as we will see, the quality of the recommendations improves. This highlights a tension between fitting the observed data and minimizing the regularization terms. Fitting the observed data often emphasizes learning high similarity (between items with many interactions), but a good embedding representation also requires learning low similarity (between items with few or no interactions). Inspect the resultsLet's see if the results with regularization look better.
###Code
user_recommendations(reg_model, DOT, exclude_rated=True, k=10)
###Output
_____no_output_____
###Markdown
Hopefully, these recommendations look better. You can change the similarity measure from COSINE to DOT and observe how this affects the recommendations.Since the model is likely to recommend items that you rated highly, you have the option to exclude the items you rated, using `exclude_rated=True`.In the following cells, we display the nearest neighbors, the embedding norms, and the t-SNE projection of the movie embeddings.
###Code
movie_neighbors(reg_model, "Aladdin", DOT)
movie_neighbors(reg_model, "Aladdin", COSINE)
###Output
_____no_output_____
###Markdown
Here we compare the embedding norms for `model` and `reg_model`. Selecting a subset of the embeddings will highlight them on both charts simultaneously.
###Code
movie_embedding_norm([model, reg_model])
# Visualize the embeddings
tsne_movie_embeddings(reg_model)
###Output
_____no_output_____
###Markdown
We should observe that the embeddings have a lot more structure than the unregularized case. Try selecting different genres and observe how they tend to form clusters (for example Horror, Animation and Children). ConclusionThis concludes this section on matrix factorization models. Note that while the scale of the problem is small enough to allow efficient training using SGD, many practical problems need to be trained using more specialized algorithms such as Alternating Least Squares (see [tf.contrib.factorization.WALSMatrixFactorization](https://www.tensorflow.org/api_docs/python/tf/contrib/factorization/WALSMatrixFactorization) for a TF implementation). VI. Softmax modelIn this section, we will train a simple softmax model that predicts whether a given user has rated a movie.**Note**: if you are taking the self-study version of the class, make sure to read through the part of the class covering Softmax training before working on this part.The model will take as input a feature vector $x$ representing the list of movies the user has rated. We start from the ratings DataFrame, which we group by user_id.
###Code
rated_movies = (ratings[["user_id", "movie_id"]]
.groupby("user_id", as_index=False)
.aggregate(lambda x: list(x)))
rated_movies.head()
###Output
_____no_output_____
###Markdown
We then create a function that generates an example batch, such that each example contains the following features:- movie_id: A tensor of strings of the movie ids that the user rated.- genre: A tensor of strings of the genres of those movies- year: A tensor of strings of the release year.
###Code
#@title Batch generation code (run this cell)
years_dict = {
movie: year for movie, year in zip(movies["movie_id"], movies["year"])
}
genres_dict = {
movie: genres.split('-')
for movie, genres in zip(movies["movie_id"], movies["all_genres"])
}
def make_batch(ratings, batch_size):
"""Creates a batch of examples.
Args:
ratings: A DataFrame of ratings such that examples["movie_id"] is a list of
movies rated by a user.
batch_size: The batch size.
"""
def pad(x, fill):
return pd.DataFrame.from_dict(x).fillna(fill).values
movie = []
year = []
genre = []
label = []
for movie_ids in ratings["movie_id"].values:
movie.append(movie_ids)
genre.append([x for movie_id in movie_ids for x in genres_dict[movie_id]])
year.append([years_dict[movie_id] for movie_id in movie_ids])
label.append([int(movie_id) for movie_id in movie_ids])
features = {
"movie_id": pad(movie, ""),
"year": pad(year, ""),
"genre": pad(genre, ""),
"label": pad(label, -1)
}
batch = (
tf.data.Dataset.from_tensor_slices(features)
.shuffle(1000)
.repeat()
.batch(batch_size)
.make_one_shot_iterator()
.get_next())
return batch
def select_random(x):
"""Selectes a random elements from each row of x."""
def to_float(x):
return tf.cast(x, tf.float32)
def to_int(x):
return tf.cast(x, tf.int64)
batch_size = tf.shape(x)[0]
rn = tf.range(batch_size)
nnz = to_float(tf.count_nonzero(x >= 0, axis=1))
rnd = tf.random_uniform([batch_size])
ids = tf.stack([to_int(rn), to_int(nnz * rnd)], axis=1)
return to_int(tf.gather_nd(x, ids))
###Output
_____no_output_____
###Markdown
Loss functionRecall that the softmax model maps the input features $x$ to a user embedding $\psi(x) \in \mathbb R^d$, where $d$ is the embedding dimension. This vector is then multiplied by a movie embedding matrix $V \in \mathbb R^{m \times d}$ (where $m$ is the number of movies), and the final output of the model is the softmax of the product$$\hat p(x) = \text{softmax}(\psi(x) V^\top).$$Given a target label $y$, if we denote by $p = 1_y$ a one-hot encoding of this target label, then the loss is the cross-entropy between $\hat p(x)$ and $p$. Exercise 7: Write a loss function for the softmax model.In this exercise, we will write a function that takes tensors representing the user embeddings $\psi(x)$, movie embeddings $V$, target label $y$, and return the cross-entropy loss.Hint: You can use the function [`tf.nn.sparse_softmax_cross_entropy_with_logits`](https://www.tensorflow.org/api_docs/python/tf/nn/sparse_softmax_cross_entropy_with_logits), which takes `logits` as input, where `logits` refers to the product $\psi(x) V^\top$.
###Code
def softmax_loss(user_embeddings, movie_embeddings, labels):
"""Returns the cross-entropy loss of the softmax model.
Args:
user_embeddings: A tensor of shape [batch_size, embedding_dim].
movie_embeddings: A tensor of shape [num_movies, embedding_dim].
labels: A sparse tensor of dense_shape [batch_size, 1], such that
labels[i] is the target label for example i.
Returns:
The mean cross-entropy loss.
"""
# ========================= Complete this section ============================
# logits =
# loss =
# ============================================================================
return loss
# @title Solution
def softmax_loss(user_embeddings, movie_embeddings, labels):
"""Returns the cross-entropy loss of the softmax model.
Args:
user_embeddings: A tensor of shape [batch_size, embedding_dim].
movie_embeddings: A tensor of shape [num_movies, embedding_dim].
labels: A tensor of [batch_size], such that labels[i] is the target label
for example i.
Returns:
The mean cross-entropy loss.
"""
# Verify that the embddings have compatible dimensions
user_emb_dim = user_embeddings.shape[1].value
movie_emb_dim = movie_embeddings.shape[1].value
if user_emb_dim != movie_emb_dim:
raise ValueError(
"The user embedding dimension %d should match the movie embedding "
"dimension % d" % (user_emb_dim, movie_emb_dim))
logits = tf.matmul(user_embeddings, movie_embeddings, transpose_b=True)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels))
return loss
###Output
_____no_output_____
###Markdown
Exercise 8: Build a softmax model, train it, and inspect its embeddings.We are now ready to build a softmax CFModel. Complete the `build_softmax_model` function in the next cell. The architecture of the model is defined in the function `create_user_embeddings` and illustrated in the figure below. The input embeddings (movie_id, genre and year) are concatenated to form the input layer, then we have hidden layers with dimensions specified by the `hidden_dims` argument. Finally, the last hidden layer is multiplied by the movie embeddings to obtain the logits layer. For the target label, we will use a randomly-sampled movie_id from the list of movies the user rated.Complete the function below by creating the feature columns and embedding columns, then creating the loss tensors both for the train and test sets (using the `softmax_loss` function of the previous exercise).
###Code
def build_softmax_model(rated_movies, embedding_cols, hidden_dims):
"""Builds a Softmax model for MovieLens.
Args:
rated_movies: DataFrame of traing examples.
embedding_cols: A dictionary mapping feature names (string) to embedding
column objects. This will be used in tf.feature_column.input_layer() to
create the input layer.
hidden_dims: int list of the dimensions of the hidden layers.
Returns:
A CFModel object.
"""
def create_network(features):
"""Maps input features dictionary to user embeddings.
Args:
features: A dictionary of input string tensors.
Returns:
outputs: A tensor of shape [batch_size, embedding_dim].
"""
# Create a bag-of-words embedding for each sparse feature.
inputs = tf.feature_column.input_layer(features, embedding_cols)
# Hidden layers.
input_dim = inputs.shape[1].value
for i, output_dim in enumerate(hidden_dims):
w = tf.get_variable(
"hidden%d_w_" % i, shape=[input_dim, output_dim],
initializer=tf.truncated_normal_initializer(
stddev=1./np.sqrt(output_dim))) / 10.
outputs = tf.matmul(inputs, w)
input_dim = output_dim
inputs = outputs
return outputs
train_rated_movies, test_rated_movies = split_dataframe(rated_movies)
train_batch = make_batch(train_rated_movies, 200)
test_batch = make_batch(test_rated_movies, 100)
with tf.variable_scope("model", reuse=False):
# Train
train_user_embeddings = create_network(train_batch)
train_labels = select_random(train_batch["label"])
with tf.variable_scope("model", reuse=True):
# Test
test_user_embeddings = create_network(test_batch)
test_labels = select_random(test_batch["label"])
movie_embeddings = tf.get_variable(
"input_layer/movie_id_embedding/embedding_weights")
# ========================= Complete this section ============================
# train_loss =
# test_loss =
# test_precision =
# ============================================================================
metrics = (
{"train_loss": train_loss, "test_loss": test_loss},
{"test_precision": test_precision}
)
embeddings = {"movie_id": movie_embeddings}
return CFModel(embeddings, train_loss, metrics)
# @title Solution
def build_softmax_model(rated_movies, embedding_cols, hidden_dims):
"""Builds a Softmax model for MovieLens.
Args:
rated_movies: DataFrame of traing examples.
embedding_cols: A dictionary mapping feature names (string) to embedding
column objects. This will be used in tf.feature_column.input_layer() to
create the input layer.
hidden_dims: int list of the dimensions of the hidden layers.
Returns:
A CFModel object.
"""
def create_network(features):
"""Maps input features dictionary to user embeddings.
Args:
features: A dictionary of input string tensors.
Returns:
outputs: A tensor of shape [batch_size, embedding_dim].
"""
# Create a bag-of-words embedding for each sparse feature.
inputs = tf.feature_column.input_layer(features, embedding_cols)
# Hidden layers.
input_dim = inputs.shape[1].value
for i, output_dim in enumerate(hidden_dims):
w = tf.get_variable(
"hidden%d_w_" % i, shape=[input_dim, output_dim],
initializer=tf.truncated_normal_initializer(
stddev=1./np.sqrt(output_dim))) / 10.
outputs = tf.matmul(inputs, w)
input_dim = output_dim
inputs = outputs
return outputs
train_rated_movies, test_rated_movies = split_dataframe(rated_movies)
train_batch = make_batch(train_rated_movies, 200)
test_batch = make_batch(test_rated_movies, 100)
with tf.variable_scope("model", reuse=False):
# Train
train_user_embeddings = create_network(train_batch)
train_labels = select_random(train_batch["label"])
with tf.variable_scope("model", reuse=True):
# Test
test_user_embeddings = create_network(test_batch)
test_labels = select_random(test_batch["label"])
movie_embeddings = tf.get_variable(
"input_layer/movie_id_embedding/embedding_weights")
test_loss = softmax_loss(
test_user_embeddings, movie_embeddings, test_labels)
train_loss = softmax_loss(
train_user_embeddings, movie_embeddings, train_labels)
_, test_precision = tf.metrics.precision_at_k(
labels=test_labels,
predictions=tf.matmul(test_user_embeddings, movie_embeddings, transpose_b=True),
k=10)
metrics = (
{"train_loss": train_loss, "test_loss": test_loss},
{"test_precision": test_precision}
)
embeddings = {"movie_id": movie_embeddings}
return CFModel(embeddings, train_loss, metrics)
###Output
_____no_output_____
###Markdown
Train the Softmax modelWe are now ready to train the softmax model. You can set the following hyperparameters:- learning rate- number of iterations. Note: you can run `softmax_model.train()` again to continue training the model from its current state.- input embedding dimensions (the `input_dims` argument)- number of hidden layers and size of each layer (the `hidden_dims` argument)Note: since our input features are string-valued (movie_id, genre, and year), we need to map them to integer ids. This is done using [`tf.feature_column.categorical_column_with_vocabulary_list`](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_list), which takes a vocabulary list specifying all the values the feature can take. Then each id is mapped to an embedding vector using [`tf.feature_column.embedding_column`](https://www.tensorflow.org/api_docs/python/tf/feature_column/embedding_column).
###Code
# Create feature embedding columns
def make_embedding_col(key, embedding_dim):
categorical_col = tf.feature_column.categorical_column_with_vocabulary_list(
key=key, vocabulary_list=list(set(movies[key].values)), num_oov_buckets=0)
return tf.feature_column.embedding_column(
categorical_column=categorical_col, dimension=embedding_dim,
# default initializer: trancated normal with stddev=1/sqrt(dimension)
combiner='mean')
with tf.Graph().as_default():
softmax_model = build_softmax_model(
rated_movies,
embedding_cols=[
make_embedding_col("movie_id", 35),
make_embedding_col("genre", 3),
make_embedding_col("year", 2),
],
hidden_dims=[35])
softmax_model.train(
learning_rate=8., num_iterations=3000, optimizer=tf.train.AdagradOptimizer)
###Output
_____no_output_____
###Markdown
Inspect the embeddingsWe can inspect the movie embeddings as we did for the previous models. Note that in this case, the movie embeddings are used at the same time as input embeddings (for the bag of words representation of the user history), and as softmax weights.
###Code
movie_neighbors(softmax_model, "Aladdin", DOT)
movie_neighbors(softmax_model, "Aladdin", COSINE)
movie_embedding_norm([reg_model, softmax_model])
tsne_movie_embeddings(softmax_model)
###Output
_____no_output_____ |
np/np1.ipynb | ###Markdown
Random values
###Code
np.random.random(3) # an array filled with random values btwn 0 and 1
np.random.random((3,4)) # 3*4 array of uniformly distributed values between 0 and 1
np.random.normal(0,10,(3,3)) #3*3 array of normally distributed values with mean 0 and Standard Deviation 1
np.random.randn((10)) # normally distributed values with mean 0 and Standard Deviation 1
np.random.randn(4,3) # 2D array of normally distributed values with mean 0 and Std of 1
# same as np.random.random((4,3)). Notice the Difference!!
np.random.randint(0,20,(3,3)) # 3*3 array of random ints between 0 to 20
# similar to np.random.randint(20,size=(3,3))
np.random.randint(0,20,10) # 1d array of random values btwn 0 to 20
# similar to np.random.randint(20, size=10)
###Output
_____no_output_____
###Markdown
other stuff
###Code
np.empty(4)
ar1=np.eye(4) # 4*4 identity matrix
ar1
###Output
_____no_output_____
###Markdown
*Fancy indexing*
###Code
ar_length=ar1.shape[1] #length of one row
ar_length
for i in range(ar_length):
ar1[i]=i
ar1
ar1[[2,0,3,1]] # Fancy indexing happens here
ar1.swapaxes(1,0) # self-explanatory
np.random.seed(0)
x3 = np.random.randint(10, size=(3, 4, 5))
print("x3 ndim: ", x3.ndim)
print("x3 shape:", x3.shape)
print("x3 size: ", x3.size)
print("dtype:", x3.dtype)
print("itemsize:", x3.itemsize, "bytes")
print("nbytes:", x3.nbytes, "bytes")
###Output
x3 ndim: 3
x3 shape: (3, 4, 5)
x3 size: 60
dtype: int32
itemsize: 4 bytes
nbytes: 240 bytes
|
Demo_TARJANI.ipynb | ###Markdown
Installation:The following section walks you through the installation steps
###Code
!git clone https://github.com/Rutvik-Trivedi/tarjani.git
%cd tarjani/scripts
!python complete_install.py
!pip install -r ../requirements.txt > logs.txt
import nltk
nltk.download('all')
from IPython import display
display.clear_output()
!python train_after_import.py
###Output
_____no_output_____
###Markdown
Running TARJANI:TARJANI is just installed and does not have any intents. So, it will not work as expected. To run TARJANI properly, please create an intent first. But for demo purpose, you may run the following interaction script to observe the behaviour of TARJANI
###Code
!python interact.py
###Output
_____no_output_____
###Markdown
Creating a Simple Intent:This section walks you through creating a simple intent of asking the name to TARJANI
###Code
!python create_intent.py
!python interact.py
###Output
_____no_output_____
###Markdown
Deleting the intent:
###Code
!python delete_intent.py
!python interact.py
###Output
_____no_output_____
###Markdown
Creating an Intent with an Entity and a Skill:This section walks you through creating an intent with a User defined Entity. Let us create an intent to create a file with the given name.
###Code
!python create_intent.py
!python interact.py
###Output
_____no_output_____
###Markdown
Exporting this Model:Let us export the trained model so that we can dowload it to our system and use it anywhere Format: ```bash./export_model.sh [any_name_you_prefer_goes_here_without_any_extension]```
###Code
!./export_model.sh create_file
###Output
_____no_output_____
###Markdown
Resetting a Model:If you want to reset the model to the initial blank version, you may use this
###Code
!python reset_model.py
###Output
_____no_output_____
###Markdown
Importing a pre-built agent:Let us import the just exported pre-built agent which creates a file A bunch of prebuilt agents are available at http://tarjani.is-great.net to download Format to use is:```bash./import_model.sh [path_of_the_model_to_import_without_extension]```
###Code
!./import_model.sh create_file
###Output
_____no_output_____
###Markdown
Submitting a Feedback:Thank you for viewing the demo on TARJANI. It would be great if you could try TARJANI and provide your feedback on its performance. It will help TARJANI improve a lot. Thank you
###Code
!python submit_feedback.py
###Output
_____no_output_____
###Markdown
Using an Experimental Feature:TARJANI has developed a couple of experimental vision features which are available for trial. Please note that these features are yet experimental and may be discontinued in case of poor performance. Please provide your feedback on their performance. This section shows how to use the experimental image captioning feature Download the addon model:The addon model for this needs to be downloaded from [tarjani-model-zoo](https://github.com/Rutvik-Trivedi/tarjani-model-zoo)
###Code
!pip install 'git+git://github.com/HR/github-clone#egg=ghclone' #One time install
%cd /content/tarjani/model/vision
!ghclone https://github.com/Rutvik-Trivedi/tarjani-model-zoo/tree/main/image-captioning
# Commands for each models are available in the README file on the repository
###Output
_____no_output_____
###Markdown
Using image captioning from the BASHI have provided a sample image link for the demo purpose: [Sample Image](https://www.alphashooters.com/wp-content/uploads/2019/01/sony-sel85f18-gsd-puppy-DSC00116-1300px.jpg) Downloading the image
###Code
%cd /content/tarjani/scripts/experimental
!wget https://www.alphashooters.com/wp-content/uploads/2019/01/sony-sel85f18-gsd-puppy-DSC00116-1300px.jpg
!mv sony-sel85f18-gsd-puppy-DSC00116-1300px.jpg test.jpg
###Output
_____no_output_____
###Markdown
Applying image captioning
###Code
!python vision.py --path test.jpg
###Output
_____no_output_____ |
Assignments/Python/CosineSimilarityAssignment.ipynb | ###Markdown
NAME : Shubham Kumar PRN : 18030142032
###Code
import numpy as np
import pandas as pd
from numpy import dot
from numpy.linalg import norm
def cos_sim(a,b):
return (dot(a, b)/(norm(a)*norm(b)))
# load the whole embedding into memory
embeddings_index = dict()
with open('./glove/glove.6B.300d.txt', encoding='UTF-8') as f:
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
print('Loaded %s word vectors.' % len(embeddings_index))
###Output
Loaded 400000 word vectors.
###Markdown
Implement Cosine Similarity and find top 10 nearest words to following words:1. wikipedia2. software3. jupiter4. wise
###Code
# Generating Cosine Similarities for all given words and adding it to dataframe
dct = {t:[cos_sim(embeddings_index.get(t),embeddings_index.get(w))
for w in list(embeddings_index.keys())] for t in ['wikipedia','software','jupiter','wise']}
df = pd.DataFrame(dct,index=list(embeddings_index.keys()))
df.head()
print("Words similar to Wise: ",list(df.sort_values(['wise'],ascending=False)[1:11].index),end='\n\n')
print("Words similar to Software: ",list(df.sort_values(['software'],ascending=False)[1:11].index),end='\n\n')
print("Words similar to Jupiter: ",list(df.sort_values(['jupiter'],ascending=False)[1:11].index),end='\n\n')
print("Words similar to Wikipedia: ",list(df.sort_values(['wikipedia'],ascending=False)[1:11].index))
###Output
Words similar to Wise: ['foolish', 'prudent', 'smart', 'shrewd', 'advice', 'good', 'savvy', 'intelligent', 'sensible', 'thoughtful']
Words similar to Software: ['computer', 'microsoft', 'hardware', 'computers', 'internet', 'server', 'pc', 'users', 'systems', 'desktop']
Words similar to Jupiter: ['moons', 'planets', 'uranus', 'planet', 'saturn', 'neptune', 'mars', 'comet', 'io', 'orbits']
Words similar to Wikipedia: ['encyclopedia', 'wikimedia', 'wiki', 'facebook', 'blog', 'conservapedia', 'youtube', 'britannica', 'websites', 'blogs']
|
courses/machine_learning/deepdive/06_structured/5_train.ipynb | ###Markdown
Training on Cloud AI PlatformThis notebook illustrates distributed training and hyperparameter tuning on Cloud AI Platform (formerly known as Cloud ML Engine).
###Code
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.13'
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/babyweight/preproc; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical set of preprocessed files if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight gs://${BUCKET}
fi
%%bash
gsutil ls gs://${BUCKET}/babyweight/preproc/*-00000*
###Output
_____no_output_____
###Markdown
Now that we have the TensorFlow code working on a subset of the data, we can package the TensorFlow code up as a Python module and train it on Cloud ML Engine. Train on Cloud ML Engine Training on Cloud ML Engine requires: Making the code a Python package Using gcloud to submit the training code to Cloud ML EngineEnsure that the AI Platform API is enabled by going to this [link](https://console.developers.google.com/apis/library/ml.googleapis.com). Lab Task 1The following code edits babyweight/trainer/task.py.
###Code
%%writefile babyweight/trainer/task.py
import argparse
import json
import os
from . import model
import tensorflow as tf
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--bucket',
help = 'GCS path to data. We assume that data is in gs://BUCKET/babyweight/preproc/',
required = True
)
parser.add_argument(
'--output_dir',
help = 'GCS location to write checkpoints and export models',
required = True
)
parser.add_argument(
'--batch_size',
help = 'Number of examples to compute gradient over.',
type = int,
default = 512
)
parser.add_argument(
'--job-dir',
help = 'this model ignores this field, but it is required by gcloud',
default = 'junk'
)
parser.add_argument(
'--nnsize',
help = 'Hidden layer sizes to use for DNN feature columns -- provide space-separated layers',
nargs = '+',
type = int,
default=[128, 32, 4]
)
parser.add_argument(
'--nembeds',
help = 'Embedding size of a cross of n key real-valued parameters',
type = int,
default = 3
)
## TODO 1: add the new arguments here
parser.add_argument(
'--train_examples',
help = 'Number of examples (in thousands) to run the training job over. If this is more than actual # of examples available, it cycles through them. So specifying 1000 here when you have only 100k examples makes this 10 epochs.',
type = int,
default = 5000
)
parser.add_argument(
'--pattern',
help = 'Specify a pattern that has to be in input files. For example 00001-of will process only one shard',
default = 'of'
)
parser.add_argument(
'--eval_steps',
help = 'Positive number of steps for which to evaluate model. Default to None, which means to evaluate until input_fn raises an end-of-input exception',
type = int,
default = None
)
## parse all arguments
args = parser.parse_args()
arguments = args.__dict__
# unused args provided by service
arguments.pop('job_dir', None)
arguments.pop('job-dir', None)
## assign the arguments to the model variables
output_dir = arguments.pop('output_dir')
model.BUCKET = arguments.pop('bucket')
model.BATCH_SIZE = arguments.pop('batch_size')
model.TRAIN_STEPS = (arguments.pop('train_examples') * 1000) / model.BATCH_SIZE
model.EVAL_STEPS = arguments.pop('eval_steps')
print ("Will train for {} steps using batch_size={}".format(model.TRAIN_STEPS, model.BATCH_SIZE))
model.PATTERN = arguments.pop('pattern')
model.NEMBEDS= arguments.pop('nembeds')
model.NNSIZE = arguments.pop('nnsize')
print ("Will use DNN size of {}".format(model.NNSIZE))
# Append trial_id to path if we are doing hptuning
# This code can be removed if you are not using hyperparameter tuning
output_dir = os.path.join(
output_dir,
json.loads(
os.environ.get('TF_CONFIG', '{}')
).get('task', {}).get('trial', '')
)
# Run the training job
model.train_and_evaluate(output_dir)
###Output
_____no_output_____
###Markdown
Lab Task 2The following code edits babyweight/trainer/model.py.
###Code
%%writefile babyweight/trainer/model.py
import shutil
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
BUCKET = None # set from task.py
PATTERN = 'of' # gets all files
# Determine CSV, label, and key columns
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks,key'.split(',')
LABEL_COLUMN = 'weight_pounds'
KEY_COLUMN = 'key'
# Set default values for each CSV column
DEFAULTS = [[0.0], ['null'], [0.0], ['null'], [0.0], ['nokey']]
# Define some hyperparameters
TRAIN_STEPS = 10000
EVAL_STEPS = None
BATCH_SIZE = 512
NEMBEDS = 3
NNSIZE = [64, 16, 4]
# Create an input function reading a file using the Dataset API
# Then provide the results to the Estimator API
def read_dataset(prefix, mode, batch_size):
def _input_fn():
def decode_csv(value_column):
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# Use prefix to create file path
file_path = 'gs://{}/babyweight/preproc/{}*{}*'.format(BUCKET, prefix, PATTERN)
# Create list of files that match pattern
file_list = tf.gfile.Glob(file_path)
# Create dataset from file list
dataset = (tf.data.TextLineDataset(file_list) # Read text file
.map(decode_csv)) # Transform each elem by applying decode_csv fn
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
return _input_fn
# Define feature columns
def get_wide_deep():
# Define column types
is_male,mother_age,plurality,gestation_weeks = \
[\
tf.feature_column.categorical_column_with_vocabulary_list('is_male',
['True', 'False', 'Unknown']),
tf.feature_column.numeric_column('mother_age'),
tf.feature_column.categorical_column_with_vocabulary_list('plurality',
['Single(1)', 'Twins(2)', 'Triplets(3)',
'Quadruplets(4)', 'Quintuplets(5)','Multiple(2+)']),
tf.feature_column.numeric_column('gestation_weeks')
]
# Discretize
age_buckets = tf.feature_column.bucketized_column(mother_age,
boundaries=np.arange(15,45,1).tolist())
gestation_buckets = tf.feature_column.bucketized_column(gestation_weeks,
boundaries=np.arange(17,47,1).tolist())
# Sparse columns are wide, have a linear relationship with the output
wide = [is_male,
plurality,
age_buckets,
gestation_buckets]
# Feature cross all the wide columns and embed into a lower dimension
crossed = tf.feature_column.crossed_column(wide, hash_bucket_size=20000)
embed = tf.feature_column.embedding_column(crossed, NEMBEDS)
# Continuous columns are deep, have a complex relationship with the output
deep = [mother_age,
gestation_weeks,
embed]
return wide, deep
# Create serving input function to be able to serve predictions later using provided inputs
def serving_input_fn():
feature_placeholders = {
'is_male': tf.placeholder(tf.string, [None]),
'mother_age': tf.placeholder(tf.float32, [None]),
'plurality': tf.placeholder(tf.string, [None]),
'gestation_weeks': tf.placeholder(tf.float32, [None]),
KEY_COLUMN: tf.placeholder_with_default(tf.constant(['nokey']), [None])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# create metric for hyperparameter tuning
def my_rmse(labels, predictions):
pred_values = predictions['predictions']
return {'rmse': tf.metrics.root_mean_squared_error(labels, pred_values)}
# Create estimator to train and evaluate
def train_and_evaluate(output_dir):
tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
wide, deep = get_wide_deep()
EVAL_INTERVAL = 300 # seconds
## TODO 2a: set the save_checkpoints_secs to the EVAL_INTERVAL
run_config = tf.estimator.RunConfig(save_checkpoints_secs = EVAL_INTERVAL,
keep_checkpoint_max = 3)
## TODO 2b: change the dnn_hidden_units to NNSIZE
estimator = tf.estimator.DNNLinearCombinedRegressor(
model_dir = output_dir,
linear_feature_columns = wide,
dnn_feature_columns = deep,
dnn_hidden_units = NNSIZE,
config = run_config)
# illustrates how to add an extra metric
estimator = tf.contrib.estimator.add_metrics(estimator, my_rmse)
# for batch prediction, you need a key associated with each instance
estimator = tf.contrib.estimator.forward_features(estimator, KEY_COLUMN)
## TODO 2c: Set the third argument of read_dataset to BATCH_SIZE
## TODO 2d: and set max_steps to TRAIN_STEPS
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset('train', tf.estimator.ModeKeys.TRAIN, BATCH_SIZE),
max_steps = TRAIN_STEPS)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn, exports_to_keep=None)
## TODO 2e: Lastly, set steps equal to EVAL_STEPS
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset('eval', tf.estimator.ModeKeys.EVAL, 2**15), # no need to batch in eval
steps = EVAL_STEPS,
start_delay_secs = 60, # start evaluating after N seconds
throttle_secs = EVAL_INTERVAL, # evaluate every N seconds
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
###Output
_____no_output_____
###Markdown
Lab Task 3After moving the code to a package, make sure it works standalone. (Note the --pattern and --train_examples lines so that I am not trying to boil the ocean on my laptop). Even then, this takes about 3 minutes in which you won't see any output ...
###Code
%%bash
echo "bucket=${BUCKET}"
rm -rf babyweight_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/babyweight
python -m trainer.task \
--bucket=${BUCKET} \
--output_dir=babyweight_trained \
--job-dir=./tmp \
--pattern="00000-of-" --train_examples=1 --eval_steps=1
###Output
_____no_output_____
###Markdown
Lab Task 4The JSON below represents an input into your prediction model. Write the input.json file below with the next cell, then run the prediction locally to assess whether it produces predictions correctly.
###Code
%%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%%bash
MODEL_LOCATION=$(ls -d $(pwd)/babyweight_trained/export/exporter/* | tail -1)
echo $MODEL_LOCATION1
gcloud ml-engine local predict --model-dir=$MODEL_LOCATION --json-instances=inputs.json
###Output
_____no_output_____
###Markdown
Lab Task 5Once the code works in standalone mode, you can run it on Cloud ML Engine. Because this is on the entire dataset, it will take a while. The training run took about two hours for me. You can monitor the job from the GCP console in the Cloud Machine Learning Engine section.
###Code
%%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=200000
###Output
_____no_output_____
###Markdown
When I ran it, I used train_examples=2000000. When training finished, I filtered in the Stackdriver log on the word "dict" and saw that the last line was:Saving dict for global step 5714290: average_loss = 1.06473, global_step = 5714290, loss = 34882.4, rmse = 1.03186The final RMSE was 1.03 pounds. Hyperparameter tuning All of these are command-line parameters to my program. To do hyperparameter tuning, create hyperparam.xml and pass it as --configFile.This step will take up to 2 hours -- you can increase maxParallelTrials or reduce maxTrials to get it done faster. Since maxParallelTrials is the number of initial seeds to start searching from, you don't want it to be too large; otherwise, all you have is a random search.
###Code
%%writefile hyperparam.yaml
trainingInput:
scaleTier: STANDARD_1
hyperparameters:
hyperparameterMetricTag: rmse
goal: MINIMIZE
maxTrials: 20
maxParallelTrials: 5
enableTrialEarlyStopping: True
params:
- parameterName: batch_size
type: INTEGER
minValue: 8
maxValue: 512
scaleType: UNIT_LOG_SCALE
- parameterName: nembeds
type: INTEGER
minValue: 3
maxValue: 30
scaleType: UNIT_LINEAR_SCALE
- parameterName: nnsize
type: INTEGER
minValue: 64
maxValue: 512
scaleType: UNIT_LOG_SCALE
%%bash
OUTDIR=gs://${BUCKET}/babyweight/hyperparam
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--config=hyperparam.yaml \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--eval_steps=10 \
--train_examples=20000
###Output
_____no_output_____
###Markdown
Repeat training This time with tuned parameters (note last line)
###Code
%%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model_tuned
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=20000 --batch_size=35 --nembeds=16 --nnsize=281
###Output
_____no_output_____
###Markdown
Training on Cloud AI PlatformThis notebook illustrates distributed training on Cloud AI Platform (formerly known as Cloud ML Engine).
###Code
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.1
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '2.1'
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/babyweight/preproc; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical set of preprocessed files if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight gs://${BUCKET}
fi
%%bash
gsutil ls gs://${BUCKET}/babyweight/preproc/*-00000*
###Output
_____no_output_____
###Markdown
Now that we have the TensorFlow code working on a subset of the data, we can package the TensorFlow code up as a Python module and train it on Cloud AI Platform. Train on Cloud AI PlatformTraining on Cloud AI Platform requires: Making the code a Python package Using gcloud to submit the training code to Cloud AI PlatformEnsure that the AI Platform API is enabled by going to this [link](https://console.developers.google.com/apis/library/ml.googleapis.com). Lab Task 1The following code edits babyweight/trainer/task.py.
###Code
%%writefile babyweight/trainer/task.py
import argparse
import json
import os
from . import model
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--bucket',
help = 'GCS path to data. We assume that data is in gs://BUCKET/babyweight/preproc/',
required = True
)
parser.add_argument(
'--output_dir',
help = 'GCS location to write checkpoints and export models',
required = True
)
parser.add_argument(
'--batch_size',
help = 'Number of examples to compute gradient over.',
type = int,
default = 512
)
parser.add_argument(
'--job-dir',
help = 'this model ignores this field, but it is required by gcloud',
default = 'junk'
)
parser.add_argument(
'--nnsize',
help = 'Hidden layer sizes to use for DNN feature columns -- provide space-separated layers',
nargs = '+',
type = int,
default=[128, 32, 4]
)
parser.add_argument(
'--nembeds',
help = 'Embedding size of a cross of n key real-valued parameters',
type = int,
default = 3
)
## TODO 1: add the new arguments here
parser.add_argument(
'--train_examples',
help = 'Number of examples (in thousands) to run the training job over. If this is more than actual # of examples available, it cycles through them. So specifying 1000 here when you have only 100k examples makes this 10 epochs.',
type = int,
default = 5000
)
parser.add_argument(
'--pattern',
help = 'Specify a pattern that has to be in input files. For example 00001-of will process only one shard',
default = 'of'
)
parser.add_argument(
'--eval_steps',
help = 'Positive number of steps for which to evaluate model. Default to None, which means to evaluate until input_fn raises an end-of-input exception',
type = int,
default = None
)
## parse all arguments
args = parser.parse_args()
arguments = args.__dict__
# unused args provided by service
arguments.pop('job_dir', None)
arguments.pop('job-dir', None)
## assign the arguments to the model variables
output_dir = arguments.pop('output_dir')
model.BUCKET = arguments.pop('bucket')
model.BATCH_SIZE = arguments.pop('batch_size')
model.TRAIN_STEPS = (arguments.pop('train_examples') * 100) / model.BATCH_SIZE
model.EVAL_STEPS = arguments.pop('eval_steps')
print ("Will train for {} steps using batch_size={}".format(model.TRAIN_STEPS, model.BATCH_SIZE))
model.PATTERN = arguments.pop('pattern')
model.NEMBEDS= arguments.pop('nembeds')
model.NNSIZE = arguments.pop('nnsize')
print ("Will use DNN size of {}".format(model.NNSIZE))
# Append trial_id to path if we are doing hptuning
# This code can be removed if you are not using hyperparameter tuning
output_dir = os.path.join(
output_dir,
json.loads(
os.environ.get('TF_CONFIG', '{}')
).get('task', {}).get('trial', '')
)
# Run the training job
model.train_and_evaluate(output_dir)
###Output
_____no_output_____
###Markdown
Lab Task 2The following code edits babyweight/trainer/model.py.
###Code
%%writefile babyweight/trainer/model.py
import shutil
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.logging.set_verbosity(tf.logging.INFO)
BUCKET = None # set from task.py
PATTERN = 'of' # gets all files
# Determine CSV, label, and key columns
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks,key'.split(',')
LABEL_COLUMN = 'weight_pounds'
KEY_COLUMN = 'key'
# Set default values for each CSV column
DEFAULTS = [[0.0], ['null'], [0.0], ['null'], [0.0], ['nokey']]
# Define some hyperparameters
TRAIN_STEPS = 10000
EVAL_STEPS = None
BATCH_SIZE = 512
NEMBEDS = 3
NNSIZE = [64, 16, 4]
# Create an input function reading a file using the Dataset API
# Then provide the results to the Estimator API
def read_dataset(prefix, mode, batch_size):
def _input_fn():
def decode_csv(value_column):
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# Use prefix to create file path
file_path = 'gs://{}/babyweight/preproc/{}*{}*'.format(BUCKET, prefix, PATTERN)
# Create list of files that match pattern
file_list = tf.gfile.Glob(file_path)
# Create dataset from file list
dataset = (tf.data.TextLineDataset(file_list) # Read text file
.map(decode_csv)) # Transform each elem by applying decode_csv fn
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
return _input_fn
# Define feature columns
def get_wide_deep():
# Define column types
is_male,mother_age,plurality,gestation_weeks = \
[\
tf.feature_column.categorical_column_with_vocabulary_list('is_male',
['True', 'False', 'Unknown']),
tf.feature_column.numeric_column('mother_age'),
tf.feature_column.categorical_column_with_vocabulary_list('plurality',
['Single(1)', 'Twins(2)', 'Triplets(3)',
'Quadruplets(4)', 'Quintuplets(5)','Multiple(2+)']),
tf.feature_column.numeric_column('gestation_weeks')
]
# Discretize
age_buckets = tf.feature_column.bucketized_column(mother_age,
boundaries=np.arange(15,45,1).tolist())
gestation_buckets = tf.feature_column.bucketized_column(gestation_weeks,
boundaries=np.arange(17,47,1).tolist())
# Sparse columns are wide, have a linear relationship with the output
wide = [is_male,
plurality,
age_buckets,
gestation_buckets]
# Feature cross all the wide columns and embed into a lower dimension
crossed = tf.feature_column.crossed_column(wide, hash_bucket_size=20000)
embed = tf.feature_column.embedding_column(crossed, NEMBEDS)
# Continuous columns are deep, have a complex relationship with the output
deep = [mother_age,
gestation_weeks,
embed]
return wide, deep
# Create serving input function to be able to serve predictions later using provided inputs
def serving_input_fn():
feature_placeholders = {
'is_male': tf.placeholder(tf.string, [None]),
'mother_age': tf.placeholder(tf.float32, [None]),
'plurality': tf.placeholder(tf.string, [None]),
'gestation_weeks': tf.placeholder(tf.float32, [None]),
KEY_COLUMN: tf.placeholder_with_default(tf.constant(['nokey']), [None])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# create metric for hyperparameter tuning
def my_rmse(labels, predictions):
pred_values = predictions['predictions']
return {'rmse': tf.metrics.root_mean_squared_error(labels, pred_values)}
def forward_features(estimator, key):
def new_model_fn(features, labels, mode, config):
spec = estimator.model_fn(features, labels, mode, config)
predictions = spec.predictions
predictions[key] = features[key]
spec = spec._replace(predictions=predictions)
return spec
return tf.estimator.Estimator(model_fn=new_model_fn, model_dir=estimator.model_dir, config=estimator.config)
# Create estimator to train and evaluate
def train_and_evaluate(output_dir):
tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
wide, deep = get_wide_deep()
EVAL_INTERVAL = 300 # seconds
## TODO 2a: set the save_checkpoints_secs to the EVAL_INTERVAL
run_config = tf.estimator.RunConfig(save_checkpoints_secs = EVAL_INTERVAL,
keep_checkpoint_max = 3)
## TODO 2b: change the dnn_hidden_units to NNSIZE
estimator = tf.estimator.DNNLinearCombinedRegressor(
model_dir = output_dir,
linear_feature_columns = wide,
dnn_feature_columns = deep,
dnn_hidden_units = NNSIZE,
config = run_config)
# illustrates how to add an extra metric
estimator = tf.estimator.add_metrics(estimator, my_rmse)
# for batch prediction, you need a key associated with each instance
estimator = forward_features(estimator, KEY_COLUMN)
## TODO 2c: Set the third argument of read_dataset to BATCH_SIZE
## TODO 2d: and set max_steps to TRAIN_STEPS
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset('train', tf.estimator.ModeKeys.TRAIN, BATCH_SIZE),
max_steps = TRAIN_STEPS)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn, exports_to_keep=None)
## TODO 2e: Lastly, set steps equal to EVAL_STEPS
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset('eval', tf.estimator.ModeKeys.EVAL, 2**15), # no need to batch in eval
steps = EVAL_STEPS,
start_delay_secs = 60, # start evaluating after N seconds
throttle_secs = EVAL_INTERVAL, # evaluate every N seconds
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
###Output
_____no_output_____
###Markdown
Lab Task 3After moving the code to a package, make sure it works standalone. (Note the --pattern and --train_examples lines so that I am not trying to boil the ocean on my laptop). Even then, this takes about 3 minutes in which you won't see any output ...
###Code
%%bash
echo "bucket=${BUCKET}"
rm -rf babyweight_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/babyweight
python -m trainer.task \
--bucket=${BUCKET} \
--output_dir=babyweight_trained \
--job-dir=./tmp \
--pattern="00000-of-" --train_examples=1 --eval_steps=1
###Output
_____no_output_____
###Markdown
Lab Task 4The JSON below represents an input into your prediction model. Write the input.json file below with the next cell, then run the prediction locally to assess whether it produces predictions correctly.
###Code
%%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%%bash
sudo find "/usr/lib/google-cloud-sdk/lib/googlecloudsdk/command_lib/ml_engine" -name '*.pyc' -delete
%%bash
MODEL_LOCATION=$(ls -d $(pwd)/babyweight_trained/export/exporter/* | tail -1)
echo $MODEL_LOCATION1
gcloud ai-platform local predict --model-dir=$MODEL_LOCATION --json-instances=inputs.json
###Output
_____no_output_____
###Markdown
Lab Task 5Once the code works in standalone mode, you can run it on Cloud AI Platform. Because this is on the entire dataset, it will take a while. The training run took about two hours for me. You can monitor the job from the GCP console in the Cloud AI Platform section.
###Code
%%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=2.1 \
--python-version=3.7 \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=20000
###Output
_____no_output_____
###Markdown
When I ran it, I used train_examples=20000. When training finished, I filtered in the Stackdriver log on the word "dict" and saw that the last line was:Saving dict for global step 5714290: average_loss = 1.06473, global_step = 5714290, loss = 34882.4, rmse = 1.03186The final RMSE was 1.03 pounds. Repeat training This time with tuned parameters (note last line)
###Code
%%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model_tuned
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=2.1 \
--python-version=3.7 \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=2000 --batch_size=35 --nembeds=16 --nnsize=281
###Output
_____no_output_____
###Markdown
Training on Cloud AI PlatformThis notebook illustrates distributed training and hyperparameter tuning on Cloud AI Platform (formerly known as Cloud ML Engine).
###Code
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '2.1'
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/babyweight/preproc; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical set of preprocessed files if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight gs://${BUCKET}
fi
%%bash
gsutil ls gs://${BUCKET}/babyweight/preproc/*-00000*
###Output
_____no_output_____
###Markdown
Now that we have the TensorFlow code working on a subset of the data, we can package the TensorFlow code up as a Python module and train it on Cloud AI Platform. Train on Cloud AI PlatformTraining on Cloud AI Platform requires: Making the code a Python package Using gcloud to submit the training code to Cloud AI PlatformEnsure that the AI Platform API is enabled by going to this [link](https://console.developers.google.com/apis/library/ml.googleapis.com). Lab Task 1The following code edits babyweight/trainer/task.py.
###Code
%%writefile babyweight/trainer/task.py
import argparse
import json
import os
from . import model
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--bucket',
help = 'GCS path to data. We assume that data is in gs://BUCKET/babyweight/preproc/',
required = True
)
parser.add_argument(
'--output_dir',
help = 'GCS location to write checkpoints and export models',
required = True
)
parser.add_argument(
'--batch_size',
help = 'Number of examples to compute gradient over.',
type = int,
default = 512
)
parser.add_argument(
'--job-dir',
help = 'this model ignores this field, but it is required by gcloud',
default = 'junk'
)
parser.add_argument(
'--nnsize',
help = 'Hidden layer sizes to use for DNN feature columns -- provide space-separated layers',
nargs = '+',
type = int,
default=[128, 32, 4]
)
parser.add_argument(
'--nembeds',
help = 'Embedding size of a cross of n key real-valued parameters',
type = int,
default = 3
)
## TODO 1: add the new arguments here
parser.add_argument(
'--train_examples',
help = 'Number of examples (in thousands) to run the training job over. If this is more than actual # of examples available, it cycles through them. So specifying 1000 here when you have only 100k examples makes this 10 epochs.',
type = int,
default = 5000
)
parser.add_argument(
'--pattern',
help = 'Specify a pattern that has to be in input files. For example 00001-of will process only one shard',
default = 'of'
)
parser.add_argument(
'--eval_steps',
help = 'Positive number of steps for which to evaluate model. Default to None, which means to evaluate until input_fn raises an end-of-input exception',
type = int,
default = None
)
## parse all arguments
args = parser.parse_args()
arguments = args.__dict__
# unused args provided by service
arguments.pop('job_dir', None)
arguments.pop('job-dir', None)
## assign the arguments to the model variables
output_dir = arguments.pop('output_dir')
model.BUCKET = arguments.pop('bucket')
model.BATCH_SIZE = arguments.pop('batch_size')
model.TRAIN_STEPS = (arguments.pop('train_examples') * 100) / model.BATCH_SIZE
model.EVAL_STEPS = arguments.pop('eval_steps')
print ("Will train for {} steps using batch_size={}".format(model.TRAIN_STEPS, model.BATCH_SIZE))
model.PATTERN = arguments.pop('pattern')
model.NEMBEDS= arguments.pop('nembeds')
model.NNSIZE = arguments.pop('nnsize')
print ("Will use DNN size of {}".format(model.NNSIZE))
# Append trial_id to path if we are doing hptuning
# This code can be removed if you are not using hyperparameter tuning
output_dir = os.path.join(
output_dir,
json.loads(
os.environ.get('TF_CONFIG', '{}')
).get('task', {}).get('trial', '')
)
# Run the training job
model.train_and_evaluate(output_dir)
###Output
_____no_output_____
###Markdown
Lab Task 2The following code edits babyweight/trainer/model.py.
###Code
%%writefile babyweight/trainer/model.py
import shutil
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.logging.set_verbosity(tf.logging.INFO)
BUCKET = None # set from task.py
PATTERN = 'of' # gets all files
# Determine CSV, label, and key columns
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks,key'.split(',')
LABEL_COLUMN = 'weight_pounds'
KEY_COLUMN = 'key'
# Set default values for each CSV column
DEFAULTS = [[0.0], ['null'], [0.0], ['null'], [0.0], ['nokey']]
# Define some hyperparameters
TRAIN_STEPS = 10000
EVAL_STEPS = None
BATCH_SIZE = 512
NEMBEDS = 3
NNSIZE = [64, 16, 4]
# Create an input function reading a file using the Dataset API
# Then provide the results to the Estimator API
def read_dataset(prefix, mode, batch_size):
def _input_fn():
def decode_csv(value_column):
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# Use prefix to create file path
file_path = 'gs://{}/babyweight/preproc/{}*{}*'.format(BUCKET, prefix, PATTERN)
# Create list of files that match pattern
file_list = tf.gfile.Glob(file_path)
# Create dataset from file list
dataset = (tf.data.TextLineDataset(file_list) # Read text file
.map(decode_csv)) # Transform each elem by applying decode_csv fn
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
return _input_fn
# Define feature columns
def get_wide_deep():
# Define column types
is_male,mother_age,plurality,gestation_weeks = \
[\
tf.feature_column.categorical_column_with_vocabulary_list('is_male',
['True', 'False', 'Unknown']),
tf.feature_column.numeric_column('mother_age'),
tf.feature_column.categorical_column_with_vocabulary_list('plurality',
['Single(1)', 'Twins(2)', 'Triplets(3)',
'Quadruplets(4)', 'Quintuplets(5)','Multiple(2+)']),
tf.feature_column.numeric_column('gestation_weeks')
]
# Discretize
age_buckets = tf.feature_column.bucketized_column(mother_age,
boundaries=np.arange(15,45,1).tolist())
gestation_buckets = tf.feature_column.bucketized_column(gestation_weeks,
boundaries=np.arange(17,47,1).tolist())
# Sparse columns are wide, have a linear relationship with the output
wide = [is_male,
plurality,
age_buckets,
gestation_buckets]
# Feature cross all the wide columns and embed into a lower dimension
crossed = tf.feature_column.crossed_column(wide, hash_bucket_size=20000)
embed = tf.feature_column.embedding_column(crossed, NEMBEDS)
# Continuous columns are deep, have a complex relationship with the output
deep = [mother_age,
gestation_weeks,
embed]
return wide, deep
# Create serving input function to be able to serve predictions later using provided inputs
def serving_input_fn():
feature_placeholders = {
'is_male': tf.placeholder(tf.string, [None]),
'mother_age': tf.placeholder(tf.float32, [None]),
'plurality': tf.placeholder(tf.string, [None]),
'gestation_weeks': tf.placeholder(tf.float32, [None]),
KEY_COLUMN: tf.placeholder_with_default(tf.constant(['nokey']), [None])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# create metric for hyperparameter tuning
def my_rmse(labels, predictions):
pred_values = predictions['predictions']
return {'rmse': tf.metrics.root_mean_squared_error(labels, pred_values)}
def forward_features(estimator, key):
def new_model_fn(features, labels, mode, config):
spec = estimator.model_fn(features, labels, mode, config)
predictions = spec.predictions
predictions[key] = features[key]
spec = spec._replace(predictions=predictions)
return spec
return tf.estimator.Estimator(model_fn=new_model_fn, model_dir=estimator.model_dir, config=estimator.config)
# Create estimator to train and evaluate
def train_and_evaluate(output_dir):
tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
wide, deep = get_wide_deep()
EVAL_INTERVAL = 300 # seconds
## TODO 2a: set the save_checkpoints_secs to the EVAL_INTERVAL
run_config = tf.estimator.RunConfig(save_checkpoints_secs = EVAL_INTERVAL,
keep_checkpoint_max = 3)
## TODO 2b: change the dnn_hidden_units to NNSIZE
estimator = tf.estimator.DNNLinearCombinedRegressor(
model_dir = output_dir,
linear_feature_columns = wide,
dnn_feature_columns = deep,
dnn_hidden_units = NNSIZE,
config = run_config)
# illustrates how to add an extra metric
estimator = tf.estimator.add_metrics(estimator, my_rmse)
# for batch prediction, you need a key associated with each instance
estimator = forward_features(estimator, KEY_COLUMN)
## TODO 2c: Set the third argument of read_dataset to BATCH_SIZE
## TODO 2d: and set max_steps to TRAIN_STEPS
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset('train', tf.estimator.ModeKeys.TRAIN, BATCH_SIZE),
max_steps = TRAIN_STEPS)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn, exports_to_keep=None)
## TODO 2e: Lastly, set steps equal to EVAL_STEPS
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset('eval', tf.estimator.ModeKeys.EVAL, 2**15), # no need to batch in eval
steps = EVAL_STEPS,
start_delay_secs = 60, # start evaluating after N seconds
throttle_secs = EVAL_INTERVAL, # evaluate every N seconds
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
###Output
_____no_output_____
###Markdown
Lab Task 3After moving the code to a package, make sure it works standalone. (Note the --pattern and --train_examples lines so that I am not trying to boil the ocean on my laptop). Even then, this takes about 3 minutes in which you won't see any output ...
###Code
%%bash
echo "bucket=${BUCKET}"
rm -rf babyweight_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/babyweight
python -m trainer.task \
--bucket=${BUCKET} \
--output_dir=babyweight_trained \
--job-dir=./tmp \
--pattern="00000-of-" --train_examples=1 --eval_steps=1
###Output
_____no_output_____
###Markdown
Lab Task 4The JSON below represents an input into your prediction model. Write the input.json file below with the next cell, then run the prediction locally to assess whether it produces predictions correctly.
###Code
%%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%%bash
sudo find "/usr/lib/google-cloud-sdk/lib/googlecloudsdk/command_lib/ml_engine" -name '*.pyc' -delete
%%bash
MODEL_LOCATION=$(ls -d $(pwd)/babyweight_trained/export/exporter/* | tail -1)
echo $MODEL_LOCATION1
gcloud ai-platform local predict --model-dir=$MODEL_LOCATION --json-instances=inputs.json
###Output
_____no_output_____
###Markdown
Lab Task 5Once the code works in standalone mode, you can run it on Cloud AI Platform. Because this is on the entire dataset, it will take a while. The training run took about two hours for me. You can monitor the job from the GCP console in the Cloud AI Platform section.
###Code
%%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=2.1 \
--python-version=3.7 \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=20000
###Output
_____no_output_____
###Markdown
When I ran it, I used train_examples=2000000. When training finished, I filtered in the Stackdriver log on the word "dict" and saw that the last line was:Saving dict for global step 5714290: average_loss = 1.06473, global_step = 5714290, loss = 34882.4, rmse = 1.03186The final RMSE was 1.03 pounds. Hyperparameter tuning All of these are command-line parameters to my program. To do hyperparameter tuning, create hyperparam.xml and pass it as --configFile.This step will take up to 2 hours -- you can increase maxParallelTrials or reduce maxTrials to get it done faster. Since maxParallelTrials is the number of initial seeds to start searching from, you don't want it to be too large; otherwise, all you have is a random search.
###Code
%%writefile hyperparam.yaml
trainingInput:
scaleTier: STANDARD_1
hyperparameters:
hyperparameterMetricTag: rmse
goal: MINIMIZE
maxTrials: 10
maxParallelTrials: 5
enableTrialEarlyStopping: True
params:
- parameterName: batch_size
type: INTEGER
minValue: 8
maxValue: 512
scaleType: UNIT_LOG_SCALE
- parameterName: nembeds
type: INTEGER
minValue: 3
maxValue: 30
scaleType: UNIT_LINEAR_SCALE
- parameterName: nnsize
type: INTEGER
minValue: 64
maxValue: 512
scaleType: UNIT_LOG_SCALE
%%bash
OUTDIR=gs://${BUCKET}/babyweight/hyperparam
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--config=hyperparam.yaml \
--runtime-version=2.1 \
--python-version=3.7 \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--eval_steps=10 \
--train_examples=2000
###Output
_____no_output_____
###Markdown
Repeat training This time with tuned parameters (note last line)
###Code
%%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model_tuned
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=2.1 \
--python-version=3.7 \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=2000 --batch_size=35 --nembeds=16 --nnsize=281
###Output
_____no_output_____
###Markdown
Training on Cloud ML Engine This notebook illustrates distributed training and hyperparameter tuning on Cloud ML Engine.
###Code
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.7'
%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical set of preprocessed files if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight gs://${BUCKET}
fi
%bash
gsutil ls gs://${BUCKET}/babyweight/preproc/*-00000*
###Output
_____no_output_____
###Markdown
Now that we have the TensorFlow code working on a subset of the data, we can package the TensorFlow code up as a Python module and train it on Cloud ML Engine. Train on Cloud ML Engine Training on Cloud ML Engine requires: Making the code a Python package Using gcloud to submit the training code to Cloud ML EngineThe code in model.py is the same as in the TensorFlow notebook. I just moved it to a file so that I could package it up as a module.(explore the directory structure).
###Code
%bash
grep "^def" babyweight/trainer/model.py
###Output
_____no_output_____
###Markdown
After moving the code to a package, make sure it works standalone. (Note the --pattern and --train_examples lines so that I am not trying to boil the ocean on my laptop). Even then, this takes about 3 minutes in which you won't see any output ...
###Code
%bash
echo "bucket=${BUCKET}"
rm -rf babyweight_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/babyweight
python -m trainer.task \
--bucket=${BUCKET} \
--output_dir=babyweight_trained \
--job-dir=./tmp \
--pattern="00000-of-" --train_examples=1 --eval_steps=1
%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%bash
MODEL_LOCATION=$(ls -d $(pwd)/babyweight_trained/export/exporter/* | tail -1)
echo $MODEL_LOCATION
gcloud ml-engine local predict --model-dir=$MODEL_LOCATION --json-instances=inputs.json
###Output
_____no_output_____
###Markdown
Once the code works in standalone mode, you can run it on Cloud ML Engine. Because this is on the entire dataset, it will take a while. The training run took about an hour for me. You can monitor the job from the GCP console in the Cloud Machine Learning Engine section.
###Code
%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=200000
###Output
_____no_output_____
###Markdown
When I ran it, I used train_examples=2000000. When training finished, I filtered in the Stackdriver log on the word "dict" and saw that the last line was:Saving dict for global step 5714290: average_loss = 1.06473, global_step = 5714290, loss = 34882.4, rmse = 1.03186The final RMSE was 1.03 pounds.
###Code
from google.datalab.ml import TensorBoard
TensorBoard().start('gs://{}/babyweight/trained_model'.format(BUCKET))
for pid in TensorBoard.list()['pid']:
TensorBoard().stop(pid)
print 'Stopped TensorBoard with pid {}'.format(pid)
###Output
_____no_output_____
###Markdown
Hyperparameter tuning All of these are command-line parameters to my program. To do hyperparameter tuning, create hyperparam.xml and pass it as --configFile.This step will take 1 hour -- you can increase maxParallelTrials or reduce maxTrials to get it done faster. Since maxParallelTrials is the number of initial seeds to start searching from, you don't want it to be too large; otherwise, all you have is a random search.
###Code
%writefile hyperparam.yaml
trainingInput:
scaleTier: STANDARD_1
hyperparameters:
hyperparameterMetricTag: rmse
goal: MINIMIZE
maxTrials: 20
maxParallelTrials: 5
enableTrialEarlyStopping: True
params:
- parameterName: batch_size
type: INTEGER
minValue: 8
maxValue: 512
scaleType: UNIT_LOG_SCALE
- parameterName: nembeds
type: INTEGER
minValue: 3
maxValue: 30
scaleType: UNIT_LINEAR_SCALE
- parameterName: nnsize
type: INTEGER
minValue: 64
maxValue: 512
scaleType: UNIT_LOG_SCALE
%bash
OUTDIR=gs://${BUCKET}/babyweight/hyperparam
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--config=hyperparam.yaml \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--eval_steps=10 \
--train_examples=20000
###Output
_____no_output_____
###Markdown
Repeat training This time with tuned parameters (note last line)
###Code
%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model_tuned
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=20000 --batch_size=35 --nembeds=16 --nnsize=281
###Output
_____no_output_____
###Markdown
Training on Cloud AI PlatformThis notebook illustrates distributed training and hyperparameter tuning on Cloud AI Platform (formerly known as Cloud ML Engine).
###Code
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.13'
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/babyweight/preproc; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical set of preprocessed files if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight gs://${BUCKET}
fi
%%bash
gsutil ls gs://${BUCKET}/babyweight/preproc/*-00000*
###Output
_____no_output_____
###Markdown
Now that we have the TensorFlow code working on a subset of the data, we can package the TensorFlow code up as a Python module and train it on Cloud AI Platform. Train on Cloud AI PlatformTraining on Cloud AI Platform requires: Making the code a Python package Using gcloud to submit the training code to Cloud AI PlatformEnsure that the AI Platform API is enabled by going to this [link](https://console.developers.google.com/apis/library/ml.googleapis.com). Lab Task 1The following code edits babyweight/trainer/task.py.
###Code
%%writefile babyweight/trainer/task.py
import argparse
import json
import os
from . import model
import tensorflow as tf
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--bucket',
help = 'GCS path to data. We assume that data is in gs://BUCKET/babyweight/preproc/',
required = True
)
parser.add_argument(
'--output_dir',
help = 'GCS location to write checkpoints and export models',
required = True
)
parser.add_argument(
'--batch_size',
help = 'Number of examples to compute gradient over.',
type = int,
default = 512
)
parser.add_argument(
'--job-dir',
help = 'this model ignores this field, but it is required by gcloud',
default = 'junk'
)
parser.add_argument(
'--nnsize',
help = 'Hidden layer sizes to use for DNN feature columns -- provide space-separated layers',
nargs = '+',
type = int,
default=[128, 32, 4]
)
parser.add_argument(
'--nembeds',
help = 'Embedding size of a cross of n key real-valued parameters',
type = int,
default = 3
)
## TODO 1: add the new arguments here
parser.add_argument(
'--train_examples',
help = 'Number of examples (in thousands) to run the training job over. If this is more than actual # of examples available, it cycles through them. So specifying 1000 here when you have only 100k examples makes this 10 epochs.',
type = int,
default = 5000
)
parser.add_argument(
'--pattern',
help = 'Specify a pattern that has to be in input files. For example 00001-of will process only one shard',
default = 'of'
)
parser.add_argument(
'--eval_steps',
help = 'Positive number of steps for which to evaluate model. Default to None, which means to evaluate until input_fn raises an end-of-input exception',
type = int,
default = None
)
## parse all arguments
args = parser.parse_args()
arguments = args.__dict__
# unused args provided by service
arguments.pop('job_dir', None)
arguments.pop('job-dir', None)
## assign the arguments to the model variables
output_dir = arguments.pop('output_dir')
model.BUCKET = arguments.pop('bucket')
model.BATCH_SIZE = arguments.pop('batch_size')
model.TRAIN_STEPS = (arguments.pop('train_examples') * 1000) / model.BATCH_SIZE
model.EVAL_STEPS = arguments.pop('eval_steps')
print ("Will train for {} steps using batch_size={}".format(model.TRAIN_STEPS, model.BATCH_SIZE))
model.PATTERN = arguments.pop('pattern')
model.NEMBEDS= arguments.pop('nembeds')
model.NNSIZE = arguments.pop('nnsize')
print ("Will use DNN size of {}".format(model.NNSIZE))
# Append trial_id to path if we are doing hptuning
# This code can be removed if you are not using hyperparameter tuning
output_dir = os.path.join(
output_dir,
json.loads(
os.environ.get('TF_CONFIG', '{}')
).get('task', {}).get('trial', '')
)
# Run the training job
model.train_and_evaluate(output_dir)
###Output
_____no_output_____
###Markdown
Lab Task 2The following code edits babyweight/trainer/model.py.
###Code
%%writefile babyweight/trainer/model.py
import shutil
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
BUCKET = None # set from task.py
PATTERN = 'of' # gets all files
# Determine CSV, label, and key columns
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks,key'.split(',')
LABEL_COLUMN = 'weight_pounds'
KEY_COLUMN = 'key'
# Set default values for each CSV column
DEFAULTS = [[0.0], ['null'], [0.0], ['null'], [0.0], ['nokey']]
# Define some hyperparameters
TRAIN_STEPS = 10000
EVAL_STEPS = None
BATCH_SIZE = 512
NEMBEDS = 3
NNSIZE = [64, 16, 4]
# Create an input function reading a file using the Dataset API
# Then provide the results to the Estimator API
def read_dataset(prefix, mode, batch_size):
def _input_fn():
def decode_csv(value_column):
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# Use prefix to create file path
file_path = 'gs://{}/babyweight/preproc/{}*{}*'.format(BUCKET, prefix, PATTERN)
# Create list of files that match pattern
file_list = tf.gfile.Glob(file_path)
# Create dataset from file list
dataset = (tf.data.TextLineDataset(file_list) # Read text file
.map(decode_csv)) # Transform each elem by applying decode_csv fn
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
return _input_fn
# Define feature columns
def get_wide_deep():
# Define column types
is_male,mother_age,plurality,gestation_weeks = \
[\
tf.feature_column.categorical_column_with_vocabulary_list('is_male',
['True', 'False', 'Unknown']),
tf.feature_column.numeric_column('mother_age'),
tf.feature_column.categorical_column_with_vocabulary_list('plurality',
['Single(1)', 'Twins(2)', 'Triplets(3)',
'Quadruplets(4)', 'Quintuplets(5)','Multiple(2+)']),
tf.feature_column.numeric_column('gestation_weeks')
]
# Discretize
age_buckets = tf.feature_column.bucketized_column(mother_age,
boundaries=np.arange(15,45,1).tolist())
gestation_buckets = tf.feature_column.bucketized_column(gestation_weeks,
boundaries=np.arange(17,47,1).tolist())
# Sparse columns are wide, have a linear relationship with the output
wide = [is_male,
plurality,
age_buckets,
gestation_buckets]
# Feature cross all the wide columns and embed into a lower dimension
crossed = tf.feature_column.crossed_column(wide, hash_bucket_size=20000)
embed = tf.feature_column.embedding_column(crossed, NEMBEDS)
# Continuous columns are deep, have a complex relationship with the output
deep = [mother_age,
gestation_weeks,
embed]
return wide, deep
# Create serving input function to be able to serve predictions later using provided inputs
def serving_input_fn():
feature_placeholders = {
'is_male': tf.placeholder(tf.string, [None]),
'mother_age': tf.placeholder(tf.float32, [None]),
'plurality': tf.placeholder(tf.string, [None]),
'gestation_weeks': tf.placeholder(tf.float32, [None]),
KEY_COLUMN: tf.placeholder_with_default(tf.constant(['nokey']), [None])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# create metric for hyperparameter tuning
def my_rmse(labels, predictions):
pred_values = predictions['predictions']
return {'rmse': tf.metrics.root_mean_squared_error(labels, pred_values)}
# Create estimator to train and evaluate
def train_and_evaluate(output_dir):
tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
wide, deep = get_wide_deep()
EVAL_INTERVAL = 300 # seconds
## TODO 2a: set the save_checkpoints_secs to the EVAL_INTERVAL
run_config = tf.estimator.RunConfig(save_checkpoints_secs = EVAL_INTERVAL,
keep_checkpoint_max = 3)
## TODO 2b: change the dnn_hidden_units to NNSIZE
estimator = tf.estimator.DNNLinearCombinedRegressor(
model_dir = output_dir,
linear_feature_columns = wide,
dnn_feature_columns = deep,
dnn_hidden_units = NNSIZE,
config = run_config)
# illustrates how to add an extra metric
estimator = tf.contrib.estimator.add_metrics(estimator, my_rmse)
# for batch prediction, you need a key associated with each instance
estimator = tf.contrib.estimator.forward_features(estimator, KEY_COLUMN)
## TODO 2c: Set the third argument of read_dataset to BATCH_SIZE
## TODO 2d: and set max_steps to TRAIN_STEPS
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset('train', tf.estimator.ModeKeys.TRAIN, BATCH_SIZE),
max_steps = TRAIN_STEPS)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn, exports_to_keep=None)
## TODO 2e: Lastly, set steps equal to EVAL_STEPS
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset('eval', tf.estimator.ModeKeys.EVAL, 2**15), # no need to batch in eval
steps = EVAL_STEPS,
start_delay_secs = 60, # start evaluating after N seconds
throttle_secs = EVAL_INTERVAL, # evaluate every N seconds
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
###Output
_____no_output_____
###Markdown
Lab Task 3After moving the code to a package, make sure it works standalone. (Note the --pattern and --train_examples lines so that I am not trying to boil the ocean on my laptop). Even then, this takes about 3 minutes in which you won't see any output ...
###Code
%%bash
echo "bucket=${BUCKET}"
rm -rf babyweight_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/babyweight
python -m trainer.task \
--bucket=${BUCKET} \
--output_dir=babyweight_trained \
--job-dir=./tmp \
--pattern="00000-of-" --train_examples=1 --eval_steps=1
###Output
_____no_output_____
###Markdown
Lab Task 4The JSON below represents an input into your prediction model. Write the input.json file below with the next cell, then run the prediction locally to assess whether it produces predictions correctly.
###Code
%%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%%bash
MODEL_LOCATION=$(ls -d $(pwd)/babyweight_trained/export/exporter/* | tail -1)
echo $MODEL_LOCATION1
gcloud ai-platform local predict --model-dir=$MODEL_LOCATION --json-instances=inputs.json
###Output
_____no_output_____
###Markdown
Lab Task 5Once the code works in standalone mode, you can run it on Cloud AI Platform. Because this is on the entire dataset, it will take a while. The training run took about two hours for me. You can monitor the job from the GCP console in the Cloud AI Platform section.
###Code
%%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=200000
###Output
_____no_output_____
###Markdown
When I ran it, I used train_examples=2000000. When training finished, I filtered in the Stackdriver log on the word "dict" and saw that the last line was:Saving dict for global step 5714290: average_loss = 1.06473, global_step = 5714290, loss = 34882.4, rmse = 1.03186The final RMSE was 1.03 pounds. Hyperparameter tuning All of these are command-line parameters to my program. To do hyperparameter tuning, create hyperparam.xml and pass it as --configFile.This step will take up to 2 hours -- you can increase maxParallelTrials or reduce maxTrials to get it done faster. Since maxParallelTrials is the number of initial seeds to start searching from, you don't want it to be too large; otherwise, all you have is a random search.
###Code
%%writefile hyperparam.yaml
trainingInput:
scaleTier: STANDARD_1
hyperparameters:
hyperparameterMetricTag: rmse
goal: MINIMIZE
maxTrials: 20
maxParallelTrials: 5
enableTrialEarlyStopping: True
params:
- parameterName: batch_size
type: INTEGER
minValue: 8
maxValue: 512
scaleType: UNIT_LOG_SCALE
- parameterName: nembeds
type: INTEGER
minValue: 3
maxValue: 30
scaleType: UNIT_LINEAR_SCALE
- parameterName: nnsize
type: INTEGER
minValue: 64
maxValue: 512
scaleType: UNIT_LOG_SCALE
%%bash
OUTDIR=gs://${BUCKET}/babyweight/hyperparam
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--config=hyperparam.yaml \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--eval_steps=10 \
--train_examples=20000
###Output
_____no_output_____
###Markdown
Repeat training This time with tuned parameters (note last line)
###Code
%%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model_tuned
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=20000 --batch_size=35 --nembeds=16 --nnsize=281
###Output
_____no_output_____
###Markdown
Training on Cloud AI PlatformThis notebook illustrates distributed training on Cloud AI Platform (formerly known as Cloud ML Engine).
###Code
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.5
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '2.5'
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/babyweight/preproc; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical set of preprocessed files if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight gs://${BUCKET}
fi
%%bash
gsutil ls gs://${BUCKET}/babyweight/preproc/*-00000*
###Output
_____no_output_____
###Markdown
Now that we have the TensorFlow code working on a subset of the data, we can package the TensorFlow code up as a Python module and train it on Cloud AI Platform. Train on Cloud AI PlatformTraining on Cloud AI Platform requires: Making the code a Python package Using gcloud to submit the training code to Cloud AI PlatformEnsure that the AI Platform API is enabled by going to this [link](https://console.developers.google.com/apis/library/ml.googleapis.com). Lab Task 1The following code edits babyweight/trainer/task.py.
###Code
%%writefile babyweight/trainer/task.py
import argparse
import json
import os
from . import model
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--bucket',
help = 'GCS path to data. We assume that data is in gs://BUCKET/babyweight/preproc/',
required = True
)
parser.add_argument(
'--output_dir',
help = 'GCS location to write checkpoints and export models',
required = True
)
parser.add_argument(
'--batch_size',
help = 'Number of examples to compute gradient over.',
type = int,
default = 512
)
parser.add_argument(
'--job-dir',
help = 'this model ignores this field, but it is required by gcloud',
default = 'junk'
)
parser.add_argument(
'--nnsize',
help = 'Hidden layer sizes to use for DNN feature columns -- provide space-separated layers',
nargs = '+',
type = int,
default=[128, 32, 4]
)
parser.add_argument(
'--nembeds',
help = 'Embedding size of a cross of n key real-valued parameters',
type = int,
default = 3
)
## TODO 1: add the new arguments here
parser.add_argument(
'--train_examples',
help = 'Number of examples (in thousands) to run the training job over. If this is more than actual # of examples available, it cycles through them. So specifying 1000 here when you have only 100k examples makes this 10 epochs.',
type = int,
default = 5000
)
parser.add_argument(
'--pattern',
help = 'Specify a pattern that has to be in input files. For example 00001-of will process only one shard',
default = 'of'
)
parser.add_argument(
'--eval_steps',
help = 'Positive number of steps for which to evaluate model. Default to None, which means to evaluate until input_fn raises an end-of-input exception',
type = int,
default = None
)
## parse all arguments
args = parser.parse_args()
arguments = args.__dict__
# unused args provided by service
arguments.pop('job_dir', None)
arguments.pop('job-dir', None)
## assign the arguments to the model variables
output_dir = arguments.pop('output_dir')
model.BUCKET = arguments.pop('bucket')
model.BATCH_SIZE = arguments.pop('batch_size')
model.TRAIN_STEPS = (arguments.pop('train_examples') * 100) / model.BATCH_SIZE
model.EVAL_STEPS = arguments.pop('eval_steps')
print ("Will train for {} steps using batch_size={}".format(model.TRAIN_STEPS, model.BATCH_SIZE))
model.PATTERN = arguments.pop('pattern')
model.NEMBEDS= arguments.pop('nembeds')
model.NNSIZE = arguments.pop('nnsize')
print ("Will use DNN size of {}".format(model.NNSIZE))
# Append trial_id to path if we are doing hptuning
# This code can be removed if you are not using hyperparameter tuning
output_dir = os.path.join(
output_dir,
json.loads(
os.environ.get('TF_CONFIG', '{}')
).get('task', {}).get('trial', '')
)
# Run the training job
model.train_and_evaluate(output_dir)
###Output
_____no_output_____
###Markdown
Lab Task 2The following code edits babyweight/trainer/model.py.
###Code
%%writefile babyweight/trainer/model.py
import shutil
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.logging.set_verbosity(tf.logging.INFO)
BUCKET = None # set from task.py
PATTERN = 'of' # gets all files
# Determine CSV, label, and key columns
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks,key'.split(',')
LABEL_COLUMN = 'weight_pounds'
KEY_COLUMN = 'key'
# Set default values for each CSV column
DEFAULTS = [[0.0], ['null'], [0.0], ['null'], [0.0], ['nokey']]
# Define some hyperparameters
TRAIN_STEPS = 10000
EVAL_STEPS = None
BATCH_SIZE = 512
NEMBEDS = 3
NNSIZE = [64, 16, 4]
# Create an input function reading a file using the Dataset API
# Then provide the results to the Estimator API
def read_dataset(prefix, mode, batch_size):
def _input_fn():
def decode_csv(value_column):
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# Use prefix to create file path
file_path = 'gs://{}/babyweight/preproc/{}*{}*'.format(BUCKET, prefix, PATTERN)
# Create list of files that match pattern
file_list = tf.gfile.Glob(file_path)
# Create dataset from file list
dataset = (tf.data.TextLineDataset(file_list) # Read text file
.map(decode_csv)) # Transform each elem by applying decode_csv fn
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
return _input_fn
# Define feature columns
def get_wide_deep():
# Define column types
is_male,mother_age,plurality,gestation_weeks = \
[\
tf.feature_column.categorical_column_with_vocabulary_list('is_male',
['True', 'False', 'Unknown']),
tf.feature_column.numeric_column('mother_age'),
tf.feature_column.categorical_column_with_vocabulary_list('plurality',
['Single(1)', 'Twins(2)', 'Triplets(3)',
'Quadruplets(4)', 'Quintuplets(5)','Multiple(2+)']),
tf.feature_column.numeric_column('gestation_weeks')
]
# Discretize
age_buckets = tf.feature_column.bucketized_column(mother_age,
boundaries=np.arange(15,45,1).tolist())
gestation_buckets = tf.feature_column.bucketized_column(gestation_weeks,
boundaries=np.arange(17,47,1).tolist())
# Sparse columns are wide, have a linear relationship with the output
wide = [is_male,
plurality,
age_buckets,
gestation_buckets]
# Feature cross all the wide columns and embed into a lower dimension
crossed = tf.feature_column.crossed_column(wide, hash_bucket_size=20000)
embed = tf.feature_column.embedding_column(crossed, NEMBEDS)
# Continuous columns are deep, have a complex relationship with the output
deep = [mother_age,
gestation_weeks,
embed]
return wide, deep
# Create serving input function to be able to serve predictions later using provided inputs
def serving_input_fn():
feature_placeholders = {
'is_male': tf.placeholder(tf.string, [None]),
'mother_age': tf.placeholder(tf.float32, [None]),
'plurality': tf.placeholder(tf.string, [None]),
'gestation_weeks': tf.placeholder(tf.float32, [None]),
KEY_COLUMN: tf.placeholder_with_default(tf.constant(['nokey']), [None])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# create metric for hyperparameter tuning
def my_rmse(labels, predictions):
pred_values = predictions['predictions']
return {'rmse': tf.metrics.root_mean_squared_error(labels, pred_values)}
def forward_features(estimator, key):
def new_model_fn(features, labels, mode, config):
spec = estimator.model_fn(features, labels, mode, config)
predictions = spec.predictions
predictions[key] = features[key]
spec = spec._replace(predictions=predictions)
return spec
return tf.estimator.Estimator(model_fn=new_model_fn, model_dir=estimator.model_dir, config=estimator.config)
# Create estimator to train and evaluate
def train_and_evaluate(output_dir):
tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
wide, deep = get_wide_deep()
EVAL_INTERVAL = 300 # seconds
## TODO 2a: set the save_checkpoints_secs to the EVAL_INTERVAL
run_config = tf.estimator.RunConfig(save_checkpoints_secs = EVAL_INTERVAL,
keep_checkpoint_max = 3)
## TODO 2b: change the dnn_hidden_units to NNSIZE
estimator = tf.estimator.DNNLinearCombinedRegressor(
model_dir = output_dir,
linear_feature_columns = wide,
dnn_feature_columns = deep,
dnn_hidden_units = NNSIZE,
config = run_config)
# illustrates how to add an extra metric
estimator = tf.estimator.add_metrics(estimator, my_rmse)
# for batch prediction, you need a key associated with each instance
estimator = forward_features(estimator, KEY_COLUMN)
## TODO 2c: Set the third argument of read_dataset to BATCH_SIZE
## TODO 2d: and set max_steps to TRAIN_STEPS
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset('train', tf.estimator.ModeKeys.TRAIN, BATCH_SIZE),
max_steps = TRAIN_STEPS)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn, exports_to_keep=None)
## TODO 2e: Lastly, set steps equal to EVAL_STEPS
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset('eval', tf.estimator.ModeKeys.EVAL, 2**15), # no need to batch in eval
steps = EVAL_STEPS,
start_delay_secs = 60, # start evaluating after N seconds
throttle_secs = EVAL_INTERVAL, # evaluate every N seconds
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
###Output
_____no_output_____
###Markdown
Lab Task 3After moving the code to a package, make sure it works standalone. (Note the --pattern and --train_examples lines so that I am not trying to boil the ocean on my laptop). Even then, this takes about 3 minutes in which you won't see any output ...
###Code
%%bash
echo "bucket=${BUCKET}"
rm -rf babyweight_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/babyweight
python -m trainer.task \
--bucket=${BUCKET} \
--output_dir=babyweight_trained \
--job-dir=./tmp \
--pattern="00000-of-" --train_examples=1 --eval_steps=1
###Output
_____no_output_____
###Markdown
Lab Task 4The JSON below represents an input into your prediction model. Write the input.json file below with the next cell, then run the prediction locally to assess whether it produces predictions correctly.
###Code
%%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%%bash
sudo find "/usr/lib/google-cloud-sdk/lib/googlecloudsdk/command_lib/ml_engine" -name '*.pyc' -delete
%%bash
MODEL_LOCATION=$(ls -d $(pwd)/babyweight_trained/export/exporter/* | tail -1)
echo $MODEL_LOCATION1
gcloud ai-platform local predict --model-dir=$MODEL_LOCATION --json-instances=inputs.json
###Output
_____no_output_____
###Markdown
Lab Task 5Once the code works in standalone mode, you can run it on Cloud AI Platform. Because this is on the entire dataset, it will take a while. The training run took about two hours for me. You can monitor the job from the GCP console in the Cloud AI Platform section.
###Code
%%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=2.4 \
--python-version=3.7 \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=20000
###Output
_____no_output_____
###Markdown
When I ran it, I used train_examples=20000. When training finished, I filtered in the Stackdriver log on the word "dict" and saw that the last line was:Saving dict for global step 5714290: average_loss = 1.06473, global_step = 5714290, loss = 34882.4, rmse = 1.03186The final RMSE was 1.03 pounds. Repeat training This time with tuned parameters (note last line)
###Code
%%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model_tuned
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=2.4 \
--python-version=3.7 \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=2000 --batch_size=35 --nembeds=16 --nnsize=281
###Output
_____no_output_____
###Markdown
Training on Cloud ML Engine This notebook illustrates distributed training and hyperparameter tuning on Cloud ML Engine.
###Code
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.8'
%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/babyweight/preproc; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical set of preprocessed files if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight gs://${BUCKET}
fi
%bash
gsutil ls gs://${BUCKET}/babyweight/preproc/*-00000*
###Output
_____no_output_____
###Markdown
Now that we have the TensorFlow code working on a subset of the data, we can package the TensorFlow code up as a Python module and train it on Cloud ML Engine. Train on Cloud ML Engine Training on Cloud ML Engine requires: Making the code a Python package Using gcloud to submit the training code to Cloud ML Engine Lab Task 1The following code edits babyweight/trainer/task.py.
###Code
%writefile babyweight/trainer/task.py
import argparse
import json
import os
from . import model
import tensorflow as tf
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--bucket',
help = 'GCS path to data. We assume that data is in gs://BUCKET/babyweight/preproc/',
required = True
)
parser.add_argument(
'--output_dir',
help = 'GCS location to write checkpoints and export models',
required = True
)
parser.add_argument(
'--batch_size',
help = 'Number of examples to compute gradient over.',
type = int,
default = 512
)
parser.add_argument(
'--job-dir',
help = 'this model ignores this field, but it is required by gcloud',
default = 'junk'
)
parser.add_argument(
'--nnsize',
help = 'Hidden layer sizes to use for DNN feature columns -- provide space-separated layers',
nargs = '+',
type = int,
default=[128, 32, 4]
)
parser.add_argument(
'--nembeds',
help = 'Embedding size of a cross of n key real-valued parameters',
type = int,
default = 3
)
## TODO 1: add the new arguments here
parser.add_argument(
'--train_examples',
help = 'Number of examples (in thousands) to run the training job over. If this is more than actual # of examples available, it cycles through them. So specifying 1000 here when you have only 100k examples makes this 10 epochs.',
type = int,
default = 5000
)
parser.add_argument(
'--pattern',
help = 'Specify a pattern that has to be in input files. For example 00001-of will process only one shard',
default = 'of'
)
parser.add_argument(
'--eval_steps',
help = 'Positive number of steps for which to evaluate model. Default to None, which means to evaluate until input_fn raises an end-of-input exception',
type = int,
default = None
)
## parse all arguments
args = parser.parse_args()
arguments = args.__dict__
# unused args provided by service
arguments.pop('job_dir', None)
arguments.pop('job-dir', None)
## assign the arguments to the model variables
output_dir = arguments.pop('output_dir')
model.BUCKET = arguments.pop('bucket')
model.BATCH_SIZE = arguments.pop('batch_size')
model.TRAIN_STEPS = (arguments.pop('train_examples') * 1000) / model.BATCH_SIZE
model.EVAL_STEPS = arguments.pop('eval_steps')
print ("Will train for {} steps using batch_size={}".format(model.TRAIN_STEPS, model.BATCH_SIZE))
model.PATTERN = arguments.pop('pattern')
model.NEMBEDS= arguments.pop('nembeds')
model.NNSIZE = arguments.pop('nnsize')
print ("Will use DNN size of {}".format(model.NNSIZE))
# Append trial_id to path if we are doing hptuning
# This code can be removed if you are not using hyperparameter tuning
output_dir = os.path.join(
output_dir,
json.loads(
os.environ.get('TF_CONFIG', '{}')
).get('task', {}).get('trial', '')
)
# Run the training job
model.train_and_evaluate(output_dir)
###Output
_____no_output_____
###Markdown
Lab Task 2The following code edits babyweight/trainer/model.py.
###Code
%writefile babyweight/trainer/model.py
import shutil
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
BUCKET = None # set from task.py
PATTERN = 'of' # gets all files
# Determine CSV, label, and key columns
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks,key'.split(',')
LABEL_COLUMN = 'weight_pounds'
KEY_COLUMN = 'key'
# Set default values for each CSV column
DEFAULTS = [[0.0], ['null'], [0.0], ['null'], [0.0], ['nokey']]
# Define some hyperparameters
TRAIN_STEPS = 10000
EVAL_STEPS = None
BATCH_SIZE = 512
NEMBEDS = 3
NNSIZE = [64, 16, 4]
# Create an input function reading a file using the Dataset API
# Then provide the results to the Estimator API
def read_dataset(prefix, mode, batch_size):
def _input_fn():
def decode_csv(value_column):
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# Use prefix to create file path
file_path = 'gs://{}/babyweight/preproc/{}*{}*'.format(BUCKET, prefix, PATTERN)
# Create list of files that match pattern
file_list = tf.gfile.Glob(file_path)
# Create dataset from file list
dataset = (tf.data.TextLineDataset(file_list) # Read text file
.map(decode_csv)) # Transform each elem by applying decode_csv fn
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
return _input_fn
# Define feature columns
def get_wide_deep():
# Define column types
is_male,mother_age,plurality,gestation_weeks = \
[\
tf.feature_column.categorical_column_with_vocabulary_list('is_male',
['True', 'False', 'Unknown']),
tf.feature_column.numeric_column('mother_age'),
tf.feature_column.categorical_column_with_vocabulary_list('plurality',
['Single(1)', 'Twins(2)', 'Triplets(3)',
'Quadruplets(4)', 'Quintuplets(5)','Multiple(2+)']),
tf.feature_column.numeric_column('gestation_weeks')
]
# Discretize
age_buckets = tf.feature_column.bucketized_column(mother_age,
boundaries=np.arange(15,45,1).tolist())
gestation_buckets = tf.feature_column.bucketized_column(gestation_weeks,
boundaries=np.arange(17,47,1).tolist())
# Sparse columns are wide, have a linear relationship with the output
wide = [is_male,
plurality,
age_buckets,
gestation_buckets]
# Feature cross all the wide columns and embed into a lower dimension
crossed = tf.feature_column.crossed_column(wide, hash_bucket_size=20000)
embed = tf.feature_column.embedding_column(crossed, NEMBEDS)
# Continuous columns are deep, have a complex relationship with the output
deep = [mother_age,
gestation_weeks,
embed]
return wide, deep
# Create serving input function to be able to serve predictions later using provided inputs
def serving_input_fn():
feature_placeholders = {
'is_male': tf.placeholder(tf.string, [None]),
'mother_age': tf.placeholder(tf.float32, [None]),
'plurality': tf.placeholder(tf.string, [None]),
'gestation_weeks': tf.placeholder(tf.float32, [None]),
KEY_COLUMN: tf.placeholder_with_default(tf.constant(['nokey']), [None])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# create metric for hyperparameter tuning
def my_rmse(labels, predictions):
pred_values = predictions['predictions']
return {'rmse': tf.metrics.root_mean_squared_error(labels, pred_values)}
# Create estimator to train and evaluate
def train_and_evaluate(output_dir):
tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
wide, deep = get_wide_deep()
EVAL_INTERVAL = 300 # seconds
## TODO 2a: set the save_checkpoints_secs to the EVAL_INTERVAL
run_config = tf.estimator.RunConfig(save_checkpoints_secs = EVAL_INTERVAL,
keep_checkpoint_max = 3)
## TODO 2b: change the dnn_hidden_units to NNSIZE
estimator = tf.estimator.DNNLinearCombinedRegressor(
model_dir = output_dir,
linear_feature_columns = wide,
dnn_feature_columns = deep,
dnn_hidden_units = NNSIZE,
config = run_config)
# illustrates how to add an extra metric
estimator = tf.contrib.estimator.add_metrics(estimator, my_rmse)
# for batch prediction, you need a key associated with each instance
estimator = tf.contrib.estimator.forward_features(estimator, KEY_COLUMN)
## TODO 2c: Set the third argument of read_dataset to BATCH_SIZE
## TODO 2d: and set max_steps to TRAIN_STEPS
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset('train', tf.estimator.ModeKeys.TRAIN, BATCH_SIZE),
max_steps = TRAIN_STEPS)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn, exports_to_keep=None)
## TODO 2e: Lastly, set steps equal to EVAL_STEPS
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset('eval', tf.estimator.ModeKeys.EVAL, 2**15), # no need to batch in eval
steps = EVAL_STEPS,
start_delay_secs = 60, # start evaluating after N seconds
throttle_secs = EVAL_INTERVAL, # evaluate every N seconds
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
###Output
_____no_output_____
###Markdown
Lab Task 3After moving the code to a package, make sure it works standalone. (Note the --pattern and --train_examples lines so that I am not trying to boil the ocean on my laptop). Even then, this takes about 3 minutes in which you won't see any output ...
###Code
%bash
echo "bucket=${BUCKET}"
rm -rf babyweight_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/babyweight
python -m trainer.task \
--bucket=${BUCKET} \
--output_dir=babyweight_trained \
--job-dir=./tmp \
--pattern="00000-of-" --train_examples=1 --eval_steps=1
###Output
_____no_output_____
###Markdown
Lab Task 4The JSON below represents an input into your prediction model. Write the input.json file below with the next cell, then run the prediction locally to assess whether it produces predictions correctly.
###Code
%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
## This doesn't play nice with Python 3, so skipping for now.
##%bash
##MODEL_LOCATION=$(ls -d $(pwd)/babyweight_trained/export/exporter/* | tail -1)
##echo $MODEL_LOCATION
##gcloud ml-engine local predict --model-dir=$MODEL_LOCATION --json-instances=inputs.json
###Output
_____no_output_____
###Markdown
Lab Task 5Once the code works in standalone mode, you can run it on Cloud ML Engine. Because this is on the entire dataset, it will take a while. The training run took about an hour for me. You can monitor the job from the GCP console in the Cloud Machine Learning Engine section.
###Code
%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=200000
###Output
_____no_output_____
###Markdown
When I ran it, I used train_examples=2000000. When training finished, I filtered in the Stackdriver log on the word "dict" and saw that the last line was:Saving dict for global step 5714290: average_loss = 1.06473, global_step = 5714290, loss = 34882.4, rmse = 1.03186The final RMSE was 1.03 pounds.
###Code
from google.datalab.ml import TensorBoard
TensorBoard().start('gs://{}/babyweight/trained_model'.format(BUCKET))
for pid in TensorBoard.list()['pid']:
TensorBoard().stop(pid)
print('Stopped TensorBoard with pid {}'.format(pid))
###Output
_____no_output_____
###Markdown
Hyperparameter tuning All of these are command-line parameters to my program. To do hyperparameter tuning, create hyperparam.xml and pass it as --configFile.This step will take 1 hour -- you can increase maxParallelTrials or reduce maxTrials to get it done faster. Since maxParallelTrials is the number of initial seeds to start searching from, you don't want it to be too large; otherwise, all you have is a random search.
###Code
%writefile hyperparam.yaml
trainingInput:
scaleTier: STANDARD_1
hyperparameters:
hyperparameterMetricTag: rmse
goal: MINIMIZE
maxTrials: 20
maxParallelTrials: 5
enableTrialEarlyStopping: True
params:
- parameterName: batch_size
type: INTEGER
minValue: 8
maxValue: 512
scaleType: UNIT_LOG_SCALE
- parameterName: nembeds
type: INTEGER
minValue: 3
maxValue: 30
scaleType: UNIT_LINEAR_SCALE
- parameterName: nnsize
type: INTEGER
minValue: 64
maxValue: 512
scaleType: UNIT_LOG_SCALE
%bash
OUTDIR=gs://${BUCKET}/babyweight/hyperparam
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--config=hyperparam.yaml \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--eval_steps=10 \
--train_examples=20000
###Output
_____no_output_____
###Markdown
Repeat training This time with tuned parameters (note last line)
###Code
%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model_tuned
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=20000 --batch_size=35 --nembeds=16 --nnsize=281
###Output
_____no_output_____
###Markdown
Training on Cloud ML Engine This notebook illustrates distributed training and hyperparameter tuning on Cloud ML Engine.
###Code
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.8'
%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/babyweight/preproc; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical set of preprocessed files if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight gs://${BUCKET}
fi
%bash
gsutil ls gs://${BUCKET}/babyweight/preproc/*-00000*
###Output
_____no_output_____
###Markdown
Now that we have the TensorFlow code working on a subset of the data, we can package the TensorFlow code up as a Python module and train it on Cloud ML Engine. Train on Cloud ML Engine Training on Cloud ML Engine requires: Making the code a Python package Using gcloud to submit the training code to Cloud ML Engine Lab Task 1The following code edits babyweight/trainer/task.py.
###Code
%writefile babyweight/trainer/task.py
import argparse
import json
import os
from . import model
import tensorflow as tf
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--bucket',
help = 'GCS path to data. We assume that data is in gs://BUCKET/babyweight/preproc/',
required = True
)
parser.add_argument(
'--output_dir',
help = 'GCS location to write checkpoints and export models',
required = True
)
parser.add_argument(
'--batch_size',
help = 'Number of examples to compute gradient over.',
type = int,
default = 512
)
parser.add_argument(
'--job-dir',
help = 'this model ignores this field, but it is required by gcloud',
default = 'junk'
)
parser.add_argument(
'--nnsize',
help = 'Hidden layer sizes to use for DNN feature columns -- provide space-separated layers',
nargs = '+',
type = int,
default=[128, 32, 4]
)
parser.add_argument(
'--nembeds',
help = 'Embedding size of a cross of n key real-valued parameters',
type = int,
default = 3
)
## TODO 1: add the new arguments here
parser.add_argument(
'--train_examples',
help = 'Number of examples (in thousands) to run the training job over. If this is more than actual # of examples available, it cycles through them. So specifying 1000 here when you have only 100k examples makes this 10 epochs.',
type = int,
default = 5000
)
parser.add_argument(
'--pattern',
help = 'Specify a pattern that has to be in input files. For example 00001-of will process only one shard',
default = 'of'
)
parser.add_argument(
'--eval_steps',
help = 'Positive number of steps for which to evaluate model. Default to None, which means to evaluate until input_fn raises an end-of-input exception',
type = int,
default = None
)
## parse all arguments
args = parser.parse_args()
arguments = args.__dict__
# unused args provided by service
arguments.pop('job_dir', None)
arguments.pop('job-dir', None)
## assign the arguments to the model variables
output_dir = arguments.pop('output_dir')
model.BUCKET = arguments.pop('bucket')
model.BATCH_SIZE = arguments.pop('batch_size')
model.TRAIN_STEPS = (arguments.pop('train_examples') * 1000) / model.BATCH_SIZE
model.EVAL_STEPS = arguments.pop('eval_steps')
print ("Will train for {} steps using batch_size={}".format(model.TRAIN_STEPS, model.BATCH_SIZE))
model.PATTERN = arguments.pop('pattern')
model.NEMBEDS= arguments.pop('nembeds')
model.NNSIZE = arguments.pop('nnsize')
print ("Will use DNN size of {}".format(model.NNSIZE))
# Append trial_id to path if we are doing hptuning
# This code can be removed if you are not using hyperparameter tuning
output_dir = os.path.join(
output_dir,
json.loads(
os.environ.get('TF_CONFIG', '{}')
).get('task', {}).get('trial', '')
)
# Run the training job
model.train_and_evaluate(output_dir)
###Output
_____no_output_____
###Markdown
Lab Task 2The following code edits babyweight/trainer/model.py.
###Code
%writefile babyweight/trainer/model.py
import shutil
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
BUCKET = None # set from task.py
PATTERN = 'of' # gets all files
# Determine CSV, label, and key columns
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks,key'.split(',')
LABEL_COLUMN = 'weight_pounds'
KEY_COLUMN = 'key'
# Set default values for each CSV column
DEFAULTS = [[0.0], ['null'], [0.0], ['null'], [0.0], ['nokey']]
# Define some hyperparameters
TRAIN_STEPS = 10000
EVAL_STEPS = None
BATCH_SIZE = 512
NEMBEDS = 3
NNSIZE = [64, 16, 4]
# Create an input function reading a file using the Dataset API
# Then provide the results to the Estimator API
def read_dataset(prefix, mode, batch_size):
def _input_fn():
def decode_csv(value_column):
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# Use prefix to create file path
file_path = 'gs://{}/babyweight/preproc/{}*{}*'.format(BUCKET, prefix, PATTERN)
# Create list of files that match pattern
file_list = tf.gfile.Glob(file_path)
# Create dataset from file list
dataset = (tf.data.TextLineDataset(file_list) # Read text file
.map(decode_csv)) # Transform each elem by applying decode_csv fn
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
return _input_fn
# Define feature columns
def get_wide_deep():
# Define column types
is_male,mother_age,plurality,gestation_weeks = \
[\
tf.feature_column.categorical_column_with_vocabulary_list('is_male',
['True', 'False', 'Unknown']),
tf.feature_column.numeric_column('mother_age'),
tf.feature_column.categorical_column_with_vocabulary_list('plurality',
['Single(1)', 'Twins(2)', 'Triplets(3)',
'Quadruplets(4)', 'Quintuplets(5)','Multiple(2+)']),
tf.feature_column.numeric_column('gestation_weeks')
]
# Discretize
age_buckets = tf.feature_column.bucketized_column(mother_age,
boundaries=np.arange(15,45,1).tolist())
gestation_buckets = tf.feature_column.bucketized_column(gestation_weeks,
boundaries=np.arange(17,47,1).tolist())
# Sparse columns are wide, have a linear relationship with the output
wide = [is_male,
plurality,
age_buckets,
gestation_buckets]
# Feature cross all the wide columns and embed into a lower dimension
crossed = tf.feature_column.crossed_column(wide, hash_bucket_size=20000)
embed = tf.feature_column.embedding_column(crossed, NEMBEDS)
# Continuous columns are deep, have a complex relationship with the output
deep = [mother_age,
gestation_weeks,
embed]
return wide, deep
# Create serving input function to be able to serve predictions later using provided inputs
def serving_input_fn():
feature_placeholders = {
'is_male': tf.placeholder(tf.string, [None]),
'mother_age': tf.placeholder(tf.float32, [None]),
'plurality': tf.placeholder(tf.string, [None]),
'gestation_weeks': tf.placeholder(tf.float32, [None]),
KEY_COLUMN: tf.placeholder_with_default(tf.constant(['nokey']), [None])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# create metric for hyperparameter tuning
def my_rmse(labels, predictions):
pred_values = predictions['predictions']
return {'rmse': tf.metrics.root_mean_squared_error(labels, pred_values)}
# Create estimator to train and evaluate
def train_and_evaluate(output_dir):
wide, deep = get_wide_deep()
EVAL_INTERVAL = 300 # seconds
## TODO 2a: set the save_checkpoints_secs to the EVAL_INTERVAL
run_config = tf.estimator.RunConfig(save_checkpoints_secs = EVAL_INTERVAL,
keep_checkpoint_max = 3)
## TODO 2b: change the dnn_hidden_units to NNSIZE
estimator = tf.estimator.DNNLinearCombinedRegressor(
model_dir = output_dir,
linear_feature_columns = wide,
dnn_feature_columns = deep,
dnn_hidden_units = NNSIZE,
config = run_config)
# illustrates how to add an extra metric
estimator = tf.contrib.estimator.add_metrics(estimator, my_rmse)
# for batch prediction, you need a key associated with each instance
estimator = tf.contrib.estimator.forward_features(estimator, KEY_COLUMN)
## TODO 2c: Set the third argument of read_dataset to BATCH_SIZE
## TODO 2d: and set max_steps to TRAIN_STEPS
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset('train', tf.estimator.ModeKeys.TRAIN, BATCH_SIZE),
max_steps = TRAIN_STEPS)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn, exports_to_keep=None)
## TODO 2e: Lastly, set steps equal to EVAL_STEPS
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset('eval', tf.estimator.ModeKeys.EVAL, 2**15), # no need to batch in eval
steps = EVAL_STEPS,
start_delay_secs = 60, # start evaluating after N seconds
throttle_secs = EVAL_INTERVAL, # evaluate every N seconds
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
###Output
_____no_output_____
###Markdown
Lab Task 3After moving the code to a package, make sure it works standalone. (Note the --pattern and --train_examples lines so that I am not trying to boil the ocean on my laptop). Even then, this takes about 3 minutes in which you won't see any output ...
###Code
%bash
echo "bucket=${BUCKET}"
rm -rf babyweight_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/babyweight
python -m trainer.task \
--bucket=${BUCKET} \
--output_dir=babyweight_trained \
--job-dir=./tmp \
--pattern="00000-of-" --train_examples=1 --eval_steps=1
###Output
_____no_output_____
###Markdown
Lab Task 4The JSON below represents an input into your prediction model. Write the input.json file below with the next cell, then run the prediction locally to assess whether it produces predictions correctly.
###Code
%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
## This doesn't play nice with Python 3, so skipping for now.
##%bash
##MODEL_LOCATION=$(ls -d $(pwd)/babyweight_trained/export/exporter/* | tail -1)
##echo $MODEL_LOCATION
##gcloud ml-engine local predict --model-dir=$MODEL_LOCATION --json-instances=inputs.json
###Output
_____no_output_____
###Markdown
Lab Task 5Once the code works in standalone mode, you can run it on Cloud ML Engine. Because this is on the entire dataset, it will take a while. The training run took about an hour for me. You can monitor the job from the GCP console in the Cloud Machine Learning Engine section.
###Code
%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=200000
###Output
_____no_output_____
###Markdown
When I ran it, I used train_examples=2000000. When training finished, I filtered in the Stackdriver log on the word "dict" and saw that the last line was:Saving dict for global step 5714290: average_loss = 1.06473, global_step = 5714290, loss = 34882.4, rmse = 1.03186The final RMSE was 1.03 pounds.
###Code
from google.datalab.ml import TensorBoard
TensorBoard().start('gs://{}/babyweight/trained_model'.format(BUCKET))
for pid in TensorBoard.list()['pid']:
TensorBoard().stop(pid)
print('Stopped TensorBoard with pid {}'.format(pid))
###Output
_____no_output_____
###Markdown
Hyperparameter tuning All of these are command-line parameters to my program. To do hyperparameter tuning, create hyperparam.xml and pass it as --configFile.This step will take 1 hour -- you can increase maxParallelTrials or reduce maxTrials to get it done faster. Since maxParallelTrials is the number of initial seeds to start searching from, you don't want it to be too large; otherwise, all you have is a random search.
###Code
%writefile hyperparam.yaml
trainingInput:
scaleTier: STANDARD_1
hyperparameters:
hyperparameterMetricTag: rmse
goal: MINIMIZE
maxTrials: 20
maxParallelTrials: 5
enableTrialEarlyStopping: True
params:
- parameterName: batch_size
type: INTEGER
minValue: 8
maxValue: 512
scaleType: UNIT_LOG_SCALE
- parameterName: nembeds
type: INTEGER
minValue: 3
maxValue: 30
scaleType: UNIT_LINEAR_SCALE
- parameterName: nnsize
type: INTEGER
minValue: 64
maxValue: 512
scaleType: UNIT_LOG_SCALE
%bash
OUTDIR=gs://${BUCKET}/babyweight/hyperparam
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--config=hyperparam.yaml \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--eval_steps=10 \
--train_examples=20000
###Output
_____no_output_____
###Markdown
Repeat training This time with tuned parameters (note last line)
###Code
%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model_tuned
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=20000 --batch_size=35 --nembeds=16 --nnsize=281
###Output
_____no_output_____
###Markdown
Training on Cloud AI PlatformThis notebook illustrates distributed training on Cloud AI Platform (formerly known as Cloud ML Engine).
###Code
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.1
# change these to try this notebook out
BUCKET = 'cloud-trainingdemosml'
PROJECT = 'mlops-content'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '2.1'
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
if ! gsutil ls | grep -q gs://$BUCKET/babyweight/preproc; then
gsutil mb -l $REGION gs://$BUCKET
# copy canonical set of preprocessed files if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight gs://$BUCKET
fi
%%bash
gsutil ls gs://${BUCKET}/babyweight/preproc/*-00000*
###Output
_____no_output_____
###Markdown
Now that we have the TensorFlow code working on a subset of the data, we can package the TensorFlow code up as a Python module and train it on Cloud AI Platform. Train on Cloud AI PlatformTraining on Cloud AI Platform requires: Making the code a Python package Using gcloud to submit the training code to Cloud AI PlatformEnsure that the AI Platform API is enabled by going to this [link](https://console.developers.google.com/apis/library/ml.googleapis.com). Lab Task 1The following code edits babyweight/trainer/task.py.
###Code
%%writefile babyweight/trainer/task.py
import argparse
import json
import os
from . import model
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--bucket',
help = 'GCS path to data. We assume that data is in gs://BUCKET/babyweight/preproc/',
required = True
)
parser.add_argument(
'--output_dir',
help = 'GCS location to write checkpoints and export models',
required = True
)
parser.add_argument(
'--batch_size',
help = 'Number of examples to compute gradient over.',
type = int,
default = 512
)
parser.add_argument(
'--job-dir',
help = 'this model ignores this field, but it is required by gcloud',
default = 'junk'
)
parser.add_argument(
'--nnsize',
help = 'Hidden layer sizes to use for DNN feature columns -- provide space-separated layers',
nargs = '+',
type = int,
default=[128, 32, 4]
)
parser.add_argument(
'--nembeds',
help = 'Embedding size of a cross of n key real-valued parameters',
type = int,
default = 3
)
## TODO 1: add the new arguments here
parser.add_argument(
'--train_examples',
help = 'Number of examples (in thousands) to run the training job over. If this is more than actual # of examples available, it cycles through them. So specifying 1000 here when you have only 100k examples makes this 10 epochs.',
type = int,
default = 5000
)
parser.add_argument(
'--pattern',
help = 'Specify a pattern that has to be in input files. For example 00001-of will process only one shard',
default = 'of'
)
parser.add_argument(
'--eval_steps',
help = 'Positive number of steps for which to evaluate model. Default to None, which means to evaluate until input_fn raises an end-of-input exception',
type = int,
default = None
)
## parse all arguments
args = parser.parse_args()
arguments = args.__dict__
# unused args provided by service
arguments.pop('job_dir', None)
arguments.pop('job-dir', None)
## assign the arguments to the model variables
output_dir = arguments.pop('output_dir')
model.BUCKET = arguments.pop('bucket')
model.BATCH_SIZE = arguments.pop('batch_size')
model.TRAIN_STEPS = (arguments.pop('train_examples') * 100) / model.BATCH_SIZE
model.EVAL_STEPS = arguments.pop('eval_steps')
print ("Will train for {} steps using batch_size={}".format(model.TRAIN_STEPS, model.BATCH_SIZE))
model.PATTERN = arguments.pop('pattern')
model.NEMBEDS= arguments.pop('nembeds')
model.NNSIZE = arguments.pop('nnsize')
print ("Will use DNN size of {}".format(model.NNSIZE))
# Append trial_id to path if we are doing hptuning
# This code can be removed if you are not using hyperparameter tuning
output_dir = os.path.join(
output_dir,
json.loads(
os.environ.get('TF_CONFIG', '{}')
).get('task', {}).get('trial', '')
)
# Run the training job
model.train_and_evaluate(output_dir)
###Output
_____no_output_____
###Markdown
Lab Task 2The following code edits babyweight/trainer/model.py.
###Code
%%writefile babyweight/trainer/model.py
import shutil
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.logging.set_verbosity(tf.logging.INFO)
BUCKET = None # set from task.py
PATTERN = 'of' # gets all files
# Determine CSV, label, and key columns
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks,key'.split(',')
LABEL_COLUMN = 'weight_pounds'
KEY_COLUMN = 'key'
# Set default values for each CSV column
DEFAULTS = [[0.0], ['null'], [0.0], ['null'], [0.0], ['nokey']]
# Define some hyperparameters
TRAIN_STEPS = 10000
EVAL_STEPS = None
BATCH_SIZE = 512
NEMBEDS = 3
NNSIZE = [64, 16, 4]
# Create an input function reading a file using the Dataset API
# Then provide the results to the Estimator API
def read_dataset(prefix, mode, batch_size):
def _input_fn():
def decode_csv(value_column):
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# Use prefix to create file path
file_path = 'gs://{}/babyweight/preproc/{}*{}*'.format(BUCKET, prefix, PATTERN)
# Create list of files that match pattern
file_list = tf.gfile.Glob(file_path)
# Create dataset from file list
dataset = (tf.data.TextLineDataset(file_list) # Read text file
.map(decode_csv)) # Transform each elem by applying decode_csv fn
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
return _input_fn
# Define feature columns
def get_wide_deep():
# Define column types
is_male,mother_age,plurality,gestation_weeks = \
[\
tf.feature_column.categorical_column_with_vocabulary_list('is_male',
['True', 'False', 'Unknown']),
tf.feature_column.numeric_column('mother_age'),
tf.feature_column.categorical_column_with_vocabulary_list('plurality',
['Single(1)', 'Twins(2)', 'Triplets(3)',
'Quadruplets(4)', 'Quintuplets(5)','Multiple(2+)']),
tf.feature_column.numeric_column('gestation_weeks')
]
# Discretize
age_buckets = tf.feature_column.bucketized_column(mother_age,
boundaries=np.arange(15,45,1).tolist())
gestation_buckets = tf.feature_column.bucketized_column(gestation_weeks,
boundaries=np.arange(17,47,1).tolist())
# Sparse columns are wide, have a linear relationship with the output
wide = [is_male,
plurality,
age_buckets,
gestation_buckets]
# Feature cross all the wide columns and embed into a lower dimension
crossed = tf.feature_column.crossed_column(wide, hash_bucket_size=20000)
embed = tf.feature_column.embedding_column(crossed, NEMBEDS)
# Continuous columns are deep, have a complex relationship with the output
deep = [mother_age,
gestation_weeks,
embed]
return wide, deep
# Create serving input function to be able to serve predictions later using provided inputs
def serving_input_fn():
feature_placeholders = {
'is_male': tf.placeholder(tf.string, [None]),
'mother_age': tf.placeholder(tf.float32, [None]),
'plurality': tf.placeholder(tf.string, [None]),
'gestation_weeks': tf.placeholder(tf.float32, [None]),
KEY_COLUMN: tf.placeholder_with_default(tf.constant(['nokey']), [None])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# create metric for hyperparameter tuning
def my_rmse(labels, predictions):
pred_values = predictions['predictions']
return {'rmse': tf.metrics.root_mean_squared_error(labels, pred_values)}
def forward_features(estimator, key):
def new_model_fn(features, labels, mode, config):
spec = estimator.model_fn(features, labels, mode, config)
predictions = spec.predictions
predictions[key] = features[key]
spec = spec._replace(predictions=predictions)
return spec
return tf.estimator.Estimator(model_fn=new_model_fn, model_dir=estimator.model_dir, config=estimator.config)
# Create estimator to train and evaluate
def train_and_evaluate(output_dir):
tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
wide, deep = get_wide_deep()
EVAL_INTERVAL = 300 # seconds
## TODO 2a: set the save_checkpoints_secs to the EVAL_INTERVAL
run_config = tf.estimator.RunConfig(save_checkpoints_secs = EVAL_INTERVAL,
keep_checkpoint_max = 3)
## TODO 2b: change the dnn_hidden_units to NNSIZE
estimator = tf.estimator.DNNLinearCombinedRegressor(
model_dir = output_dir,
linear_feature_columns = wide,
dnn_feature_columns = deep,
dnn_hidden_units = NNSIZE,
config = run_config)
# illustrates how to add an extra metric
estimator = tf.estimator.add_metrics(estimator, my_rmse)
# for batch prediction, you need a key associated with each instance
estimator = forward_features(estimator, KEY_COLUMN)
## TODO 2c: Set the third argument of read_dataset to BATCH_SIZE
## TODO 2d: and set max_steps to TRAIN_STEPS
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset('train', tf.estimator.ModeKeys.TRAIN, BATCH_SIZE),
max_steps = TRAIN_STEPS)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn, exports_to_keep=None)
## TODO 2e: Lastly, set steps equal to EVAL_STEPS
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset('eval', tf.estimator.ModeKeys.EVAL, 2**15), # no need to batch in eval
steps = EVAL_STEPS,
start_delay_secs = 60, # start evaluating after N seconds
throttle_secs = EVAL_INTERVAL, # evaluate every N seconds
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
###Output
_____no_output_____
###Markdown
Lab Task 3After moving the code to a package, make sure it works standalone. (Note the --pattern and --train_examples lines so that I am not trying to boil the ocean on my laptop). Even then, this takes about 3 minutes in which you won't see any output ...
###Code
%%bash
echo "bucket=${BUCKET}"
rm -rf babyweight_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/babyweight
python -m trainer.task \
--bucket=${BUCKET} \
--output_dir=babyweight_trained \
--job-dir=./tmp \
--pattern="00000-of-" --train_examples=1 --eval_steps=1
###Output
_____no_output_____
###Markdown
Lab Task 4The JSON below represents an input into your prediction model. Write the input.json file below with the next cell, then run the prediction locally to assess whether it produces predictions correctly.
###Code
%%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%%bash
sudo find "/usr/lib/google-cloud-sdk/lib/googlecloudsdk/command_lib/ml_engine" -name '*.pyc' -delete
%%bash
MODEL_LOCATION=$(ls -d $(pwd)/babyweight_trained/export/exporter/* | tail -1)
echo $MODEL_LOCATION1
gcloud ai-platform local predict --model-dir=$MODEL_LOCATION --json-instances=inputs.json
###Output
_____no_output_____
###Markdown
Lab Task 5Once the code works in standalone mode, you can run it on Cloud AI Platform. Because this is on the entire dataset, it will take a while. The training run took about two hours for me. You can monitor the job from the GCP console in the Cloud AI Platform section.
###Code
%%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=2.1 \
--python-version=3.7 \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=200
###Output
_____no_output_____
###Markdown
When I ran it, I used train_examples=200. When training finished, I filtered in the Stackdriver log on the word "dict" and saw that the last line was:Saving dict for global step 5714290: average_loss = 1.06473, global_step = 5714290, loss = 34882.4, rmse = 1.03186The final RMSE was 1.03 pounds. Repeat training This time with tuned parameters (note last line)
###Code
%%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model_tuned
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=2.1 \
--python-version=3.7 \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=20 --batch_size=5 --nembeds=8 --nnsize=28
###Output
_____no_output_____
###Markdown
Training on Cloud ML Engine This notebook illustrates distributed training and hyperparameter tuning on Cloud ML Engine.
###Code
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import tensorflow as tf
print(tf.__version__)
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.8'
%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical set of preprocessed files if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight gs://${BUCKET}
fi
%bash
gsutil ls gs://${BUCKET}/babyweight/preproc/*-00000*
###Output
_____no_output_____
###Markdown
Now that we have the TensorFlow code working on a subset of the data, we can package the TensorFlow code up as a Python module and train it on Cloud ML Engine. Train on Cloud ML Engine Training on Cloud ML Engine requires: Making the code a Python package Using gcloud to submit the training code to Cloud ML Engine Lab Task 1The following code edits babyweight/trainer/task.py.
###Code
%writefile babyweight/trainer/task.py
import argparse
import json
import os
import model
import tensorflow as tf
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--bucket',
help = 'GCS path to data. We assume that data is in gs://BUCKET/babyweight/preproc/',
required = True
)
parser.add_argument(
'--output_dir',
help = 'GCS location to write checkpoints and export models',
required = True
)
parser.add_argument(
'--batch_size',
help = 'Number of examples to compute gradient over.',
type = int,
default = 512
)
parser.add_argument(
'--job-dir',
help = 'this model ignores this field, but it is required by gcloud',
default = 'junk'
)
parser.add_argument(
'--nnsize',
help = 'Hidden layer sizes to use for DNN feature columns -- provide space-separated layers',
nargs = '+',
type = int,
default=[128, 32, 4]
)
parser.add_argument(
'--nembeds',
help = 'Embedding size of a cross of n key real-valued parameters',
type = int,
default = 3
)
## TODO 1: add the new arguments here
parser.add_argument(
'--train_examples',
help = 'Number of examples (in thousands) to run the training job over. If this is more than actual # of examples available, it cycles through them. So specifying 1000 here when you have only 100k examples makes this 10 epochs.',
type = int,
default = 5000
)
parser.add_argument(
'--pattern',
help = 'Specify a pattern that has to be in input files. For example 00001-of will process only one shard',
default = 'of'
)
parser.add_argument(
'--eval_steps',
help = 'Positive number of steps for which to evaluate model. Default to None, which means to evaluate until input_fn raises an end-of-input exception',
type = int,
default = None
)
## parse all arguments
args = parser.parse_args()
arguments = args.__dict__
# unused args provided by service
arguments.pop('job_dir', None)
arguments.pop('job-dir', None)
## assign the arguments to the model variables
output_dir = arguments.pop('output_dir')
model.BUCKET = arguments.pop('bucket')
model.BATCH_SIZE = arguments.pop('batch_size')
model.TRAIN_STEPS = (arguments.pop('train_examples') * 1000) / model.BATCH_SIZE
model.EVAL_STEPS = arguments.pop('eval_steps')
print ("Will train for {} steps using batch_size={}".format(model.TRAIN_STEPS, model.BATCH_SIZE))
model.PATTERN = arguments.pop('pattern')
model.NEMBEDS= arguments.pop('nembeds')
model.NNSIZE = arguments.pop('nnsize')
print ("Will use DNN size of {}".format(model.NNSIZE))
# Append trial_id to path if we are doing hptuning
# This code can be removed if you are not using hyperparameter tuning
output_dir = os.path.join(
output_dir,
json.loads(
os.environ.get('TF_CONFIG', '{}')
).get('task', {}).get('trial', '')
)
# Run the training job
model.train_and_evaluate(output_dir)
###Output
_____no_output_____
###Markdown
Lab Task 2The following code edits babyweight/trainer/model.py.
###Code
%writefile babyweight/trainer/model.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
BUCKET = None # set from task.py
PATTERN = 'of' # gets all files
# Determine CSV, label, and key columns
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks,key'.split(',')
LABEL_COLUMN = 'weight_pounds'
KEY_COLUMN = 'key'
# Set default values for each CSV column
DEFAULTS = [[0.0], ['null'], [0.0], ['null'], [0.0], ['nokey']]
# Define some hyperparameters
TRAIN_STEPS = 10000
EVAL_STEPS = None
BATCH_SIZE = 512
NEMBEDS = 3
NNSIZE = [64, 16, 4]
# Create an input function reading a file using the Dataset API
# Then provide the results to the Estimator API
def read_dataset(prefix, mode, batch_size):
def _input_fn():
def decode_csv(value_column):
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# Use prefix to create file path
file_path = 'gs://{}/babyweight/preproc/{}*{}*'.format(BUCKET, prefix, PATTERN)
# Create list of files that match pattern
file_list = tf.gfile.Glob(file_path)
# Create dataset from file list
dataset = (tf.data.TextLineDataset(file_list) # Read text file
.map(decode_csv)) # Transform each elem by applying decode_csv fn
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
return _input_fn
# Define feature columns
def get_wide_deep():
# Define column types
is_male,mother_age,plurality,gestation_weeks = \
[\
tf.feature_column.categorical_column_with_vocabulary_list('is_male',
['True', 'False', 'Unknown']),
tf.feature_column.numeric_column('mother_age'),
tf.feature_column.categorical_column_with_vocabulary_list('plurality',
['Single(1)', 'Twins(2)', 'Triplets(3)',
'Quadruplets(4)', 'Quintuplets(5)','Multiple(2+)']),
tf.feature_column.numeric_column('gestation_weeks')
]
# Discretize
age_buckets = tf.feature_column.bucketized_column(mother_age,
boundaries=np.arange(15,45,1).tolist())
gestation_buckets = tf.feature_column.bucketized_column(gestation_weeks,
boundaries=np.arange(17,47,1).tolist())
# Sparse columns are wide, have a linear relationship with the output
wide = [is_male,
plurality,
age_buckets,
gestation_buckets]
# Feature cross all the wide columns and embed into a lower dimension
crossed = tf.feature_column.crossed_column(wide, hash_bucket_size=20000)
embed = tf.feature_column.embedding_column(crossed, NEMBEDS)
# Continuous columns are deep, have a complex relationship with the output
deep = [mother_age,
gestation_weeks,
embed]
return wide, deep
# Create serving input function to be able to serve predictions later using provided inputs
def serving_input_fn():
feature_placeholders = {
'is_male': tf.placeholder(tf.string, [None]),
'mother_age': tf.placeholder(tf.float32, [None]),
'plurality': tf.placeholder(tf.string, [None]),
'gestation_weeks': tf.placeholder(tf.float32, [None]),
KEY_COLUMN: tf.placeholder_with_default(tf.constant(['nokey']), [None])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# create metric for hyperparameter tuning
def my_rmse(labels, predictions):
pred_values = predictions['predictions']
return {'rmse': tf.metrics.root_mean_squared_error(labels, pred_values)}
# Create estimator to train and evaluate
def train_and_evaluate(output_dir):
wide, deep = get_wide_deep()
EVAL_INTERVAL = 300 # seconds
## TODO 2a: set the save_checkpoints_secs to the EVAL_INTERVAL
run_config = tf.estimator.RunConfig(save_checkpoints_secs = EVAL_INTERVAL,
keep_checkpoint_max = 3)
## TODO 2b: change the dnn_hidden_units to NNSIZE
estimator = tf.estimator.DNNLinearCombinedRegressor(
model_dir = output_dir,
linear_feature_columns = wide,
dnn_feature_columns = deep,
dnn_hidden_units = NNSIZE,
config = run_config)
# illustrates how to add an extra metric
estimator = tf.contrib.estimator.add_metrics(estimator, my_rmse)
# for batch prediction, you need a key associated with each instance
estimator = tf.contrib.estimator.forward_features(estimator, KEY_COLUMN)
## TODO 2c: Set the third argument of read_dataset to BATCH_SIZE
## TODO 2d: and set max_steps to TRAIN_STEPS
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset('train', tf.estimator.ModeKeys.TRAIN, BATCH_SIZE),
max_steps = TRAIN_STEPS)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn, exports_to_keep=None)
## TODO 2e: Lastly, set steps equal to EVAL_STEPS
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset('eval', tf.estimator.ModeKeys.EVAL, 2**15), # no need to batch in eval
steps = EVAL_STEPS,
start_delay_secs = 60, # start evaluating after N seconds
throttle_secs = EVAL_INTERVAL, # evaluate every N seconds
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
###Output
_____no_output_____
###Markdown
Lab Task 3After moving the code to a package, make sure it works standalone. (Note the --pattern and --train_examples lines so that I am not trying to boil the ocean on my laptop). Even then, this takes about 3 minutes in which you won't see any output ...
###Code
%bash
echo "bucket=${BUCKET}"
rm -rf babyweight_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/babyweight
python -m trainer.task \
--bucket=${BUCKET} \
--output_dir=babyweight_trained \
--job-dir=./tmp \
--pattern="00000-of-" --train_examples=1 --eval_steps=1
###Output
_____no_output_____
###Markdown
Lab Task 4The JSON below represents an input into your prediction model. Write the input.json file below with the next cell, then run the prediction locally to assess whether it produces predictions correctly.
###Code
%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%bash
MODEL_LOCATION=$(ls -d $(pwd)/babyweight_trained/export/exporter/* | tail -1)
echo $MODEL_LOCATION
gcloud ml-engine local predict --model-dir=$MODEL_LOCATION --json-instances=inputs.json
###Output
_____no_output_____
###Markdown
Lab Task 5Once the code works in standalone mode, you can run it on Cloud ML Engine. Because this is on the entire dataset, it will take a while. The training run took about an hour for me. You can monitor the job from the GCP console in the Cloud Machine Learning Engine section.
###Code
%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=200000
###Output
_____no_output_____
###Markdown
When I ran it, I used train_examples=2000000. When training finished, I filtered in the Stackdriver log on the word "dict" and saw that the last line was:Saving dict for global step 5714290: average_loss = 1.06473, global_step = 5714290, loss = 34882.4, rmse = 1.03186The final RMSE was 1.03 pounds.
###Code
from google.datalab.ml import TensorBoard
TensorBoard().start('gs://{}/babyweight/trained_model'.format(BUCKET))
for pid in TensorBoard.list()['pid']:
TensorBoard().stop(pid)
print 'Stopped TensorBoard with pid {}'.format(pid)
###Output
_____no_output_____
###Markdown
Hyperparameter tuning All of these are command-line parameters to my program. To do hyperparameter tuning, create hyperparam.xml and pass it as --configFile.This step will take 1 hour -- you can increase maxParallelTrials or reduce maxTrials to get it done faster. Since maxParallelTrials is the number of initial seeds to start searching from, you don't want it to be too large; otherwise, all you have is a random search.
###Code
%writefile hyperparam.yaml
trainingInput:
scaleTier: STANDARD_1
hyperparameters:
hyperparameterMetricTag: rmse
goal: MINIMIZE
maxTrials: 20
maxParallelTrials: 5
enableTrialEarlyStopping: True
params:
- parameterName: batch_size
type: INTEGER
minValue: 8
maxValue: 512
scaleType: UNIT_LOG_SCALE
- parameterName: nembeds
type: INTEGER
minValue: 3
maxValue: 30
scaleType: UNIT_LINEAR_SCALE
- parameterName: nnsize
type: INTEGER
minValue: 64
maxValue: 512
scaleType: UNIT_LOG_SCALE
%bash
OUTDIR=gs://${BUCKET}/babyweight/hyperparam
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--config=hyperparam.yaml \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--eval_steps=10 \
--train_examples=20000
###Output
_____no_output_____
###Markdown
Repeat training This time with tuned parameters (note last line)
###Code
%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model_tuned
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=20000 --batch_size=35 --nembeds=16 --nnsize=281
###Output
_____no_output_____
###Markdown
Training on Cloud AI PlatformThis notebook illustrates distributed training on Cloud AI Platform (formerly known as Cloud ML Engine).
###Code
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.5
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '2.5'
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/babyweight/preproc; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical set of preprocessed files if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight gs://${BUCKET}
fi
%%bash
gsutil ls gs://${BUCKET}/babyweight/preproc/*-00000*
###Output
_____no_output_____
###Markdown
Now that we have the TensorFlow code working on a subset of the data, we can package the TensorFlow code up as a Python module and train it on Cloud AI Platform. Train on Cloud AI PlatformTraining on Cloud AI Platform requires: Making the code a Python package Using gcloud to submit the training code to Cloud AI PlatformEnsure that the AI Platform API is enabled by going to this [link](https://console.developers.google.com/apis/library/ml.googleapis.com). Lab Task 1The following code edits babyweight/trainer/task.py.
###Code
%%writefile babyweight/trainer/task.py
import argparse
import json
import os
from . import model
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--bucket',
help = 'GCS path to data. We assume that data is in gs://BUCKET/babyweight/preproc/',
required = True
)
parser.add_argument(
'--output_dir',
help = 'GCS location to write checkpoints and export models',
required = True
)
parser.add_argument(
'--batch_size',
help = 'Number of examples to compute gradient over.',
type = int,
default = 512
)
parser.add_argument(
'--job-dir',
help = 'this model ignores this field, but it is required by gcloud',
default = 'junk'
)
parser.add_argument(
'--nnsize',
help = 'Hidden layer sizes to use for DNN feature columns -- provide space-separated layers',
nargs = '+',
type = int,
default=[128, 32, 4]
)
parser.add_argument(
'--nembeds',
help = 'Embedding size of a cross of n key real-valued parameters',
type = int,
default = 3
)
## TODO 1: add the new arguments here
parser.add_argument(
'--train_examples',
help = 'Number of examples (in thousands) to run the training job over. If this is more than actual # of examples available, it cycles through them. So specifying 1000 here when you have only 100k examples makes this 10 epochs.',
type = int,
default = 5000
)
parser.add_argument(
'--pattern',
help = 'Specify a pattern that has to be in input files. For example 00001-of will process only one shard',
default = 'of'
)
parser.add_argument(
'--eval_steps',
help = 'Positive number of steps for which to evaluate model. Default to None, which means to evaluate until input_fn raises an end-of-input exception',
type = int,
default = None
)
## parse all arguments
args = parser.parse_args()
arguments = args.__dict__
# unused args provided by service
arguments.pop('job_dir', None)
arguments.pop('job-dir', None)
## assign the arguments to the model variables
output_dir = arguments.pop('output_dir')
model.BUCKET = arguments.pop('bucket')
model.BATCH_SIZE = arguments.pop('batch_size')
model.TRAIN_STEPS = (arguments.pop('train_examples') * 100) / model.BATCH_SIZE
model.EVAL_STEPS = arguments.pop('eval_steps')
print ("Will train for {} steps using batch_size={}".format(model.TRAIN_STEPS, model.BATCH_SIZE))
model.PATTERN = arguments.pop('pattern')
model.NEMBEDS= arguments.pop('nembeds')
model.NNSIZE = arguments.pop('nnsize')
print ("Will use DNN size of {}".format(model.NNSIZE))
# Append trial_id to path if we are doing hptuning
# This code can be removed if you are not using hyperparameter tuning
output_dir = os.path.join(
output_dir,
json.loads(
os.environ.get('TF_CONFIG', '{}')
).get('task', {}).get('trial', '')
)
# Run the training job
model.train_and_evaluate(output_dir)
###Output
_____no_output_____
###Markdown
Lab Task 2The following code edits babyweight/trainer/model.py.
###Code
%%writefile babyweight/trainer/model.py
import shutil
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.logging.set_verbosity(tf.logging.INFO)
BUCKET = None # set from task.py
PATTERN = 'of' # gets all files
# Determine CSV, label, and key columns
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks,key'.split(',')
LABEL_COLUMN = 'weight_pounds'
KEY_COLUMN = 'key'
# Set default values for each CSV column
DEFAULTS = [[0.0], ['null'], [0.0], ['null'], [0.0], ['nokey']]
# Define some hyperparameters
TRAIN_STEPS = 10000
EVAL_STEPS = None
BATCH_SIZE = 512
NEMBEDS = 3
NNSIZE = [64, 16, 4]
# Create an input function reading a file using the Dataset API
# Then provide the results to the Estimator API
def read_dataset(prefix, mode, batch_size):
def _input_fn():
def decode_csv(value_column):
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# Use prefix to create file path
file_path = 'gs://{}/babyweight/preproc/{}*{}*'.format(BUCKET, prefix, PATTERN)
# Create list of files that match pattern
file_list = tf.gfile.Glob(file_path)
# Create dataset from file list
dataset = (tf.data.TextLineDataset(file_list) # Read text file
.map(decode_csv)) # Transform each elem by applying decode_csv fn
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
return _input_fn
# Define feature columns
def get_wide_deep():
# Define column types
is_male,mother_age,plurality,gestation_weeks = \
[\
tf.feature_column.categorical_column_with_vocabulary_list('is_male',
['True', 'False', 'Unknown']),
tf.feature_column.numeric_column('mother_age'),
tf.feature_column.categorical_column_with_vocabulary_list('plurality',
['Single(1)', 'Twins(2)', 'Triplets(3)',
'Quadruplets(4)', 'Quintuplets(5)','Multiple(2+)']),
tf.feature_column.numeric_column('gestation_weeks')
]
# Discretize
age_buckets = tf.feature_column.bucketized_column(mother_age,
boundaries=np.arange(15,45,1).tolist())
gestation_buckets = tf.feature_column.bucketized_column(gestation_weeks,
boundaries=np.arange(17,47,1).tolist())
# Sparse columns are wide, have a linear relationship with the output
wide = [is_male,
plurality,
age_buckets,
gestation_buckets]
# Feature cross all the wide columns and embed into a lower dimension
crossed = tf.feature_column.crossed_column(wide, hash_bucket_size=20000)
embed = tf.feature_column.embedding_column(crossed, NEMBEDS)
# Continuous columns are deep, have a complex relationship with the output
deep = [mother_age,
gestation_weeks,
embed]
return wide, deep
# Create serving input function to be able to serve predictions later using provided inputs
def serving_input_fn():
feature_placeholders = {
'is_male': tf.placeholder(tf.string, [None]),
'mother_age': tf.placeholder(tf.float32, [None]),
'plurality': tf.placeholder(tf.string, [None]),
'gestation_weeks': tf.placeholder(tf.float32, [None]),
KEY_COLUMN: tf.placeholder_with_default(tf.constant(['nokey']), [None])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# create metric for hyperparameter tuning
def my_rmse(labels, predictions):
pred_values = predictions['predictions']
return {'rmse': tf.metrics.root_mean_squared_error(labels, pred_values)}
def forward_features(estimator, key):
def new_model_fn(features, labels, mode, config):
spec = estimator.model_fn(features, labels, mode, config)
predictions = spec.predictions
predictions[key] = features[key]
spec = spec._replace(predictions=predictions)
return spec
return tf.estimator.Estimator(model_fn=new_model_fn, model_dir=estimator.model_dir, config=estimator.config)
# Create estimator to train and evaluate
def train_and_evaluate(output_dir):
tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
wide, deep = get_wide_deep()
EVAL_INTERVAL = 300 # seconds
## TODO 2a: set the save_checkpoints_secs to the EVAL_INTERVAL
run_config = tf.estimator.RunConfig(save_checkpoints_secs = EVAL_INTERVAL,
keep_checkpoint_max = 3)
## TODO 2b: change the dnn_hidden_units to NNSIZE
estimator = tf.estimator.DNNLinearCombinedRegressor(
model_dir = output_dir,
linear_feature_columns = wide,
dnn_feature_columns = deep,
dnn_hidden_units = NNSIZE,
config = run_config)
# illustrates how to add an extra metric
estimator = tf.estimator.add_metrics(estimator, my_rmse)
# for batch prediction, you need a key associated with each instance
estimator = forward_features(estimator, KEY_COLUMN)
## TODO 2c: Set the third argument of read_dataset to BATCH_SIZE
## TODO 2d: and set max_steps to TRAIN_STEPS
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset('train', tf.estimator.ModeKeys.TRAIN, BATCH_SIZE),
max_steps = TRAIN_STEPS)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn, exports_to_keep=None)
## TODO 2e: Lastly, set steps equal to EVAL_STEPS
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset('eval', tf.estimator.ModeKeys.EVAL, 2**15), # no need to batch in eval
steps = EVAL_STEPS,
start_delay_secs = 60, # start evaluating after N seconds
throttle_secs = EVAL_INTERVAL, # evaluate every N seconds
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
###Output
_____no_output_____
###Markdown
Lab Task 3After moving the code to a package, make sure it works standalone. (Note the --pattern and --train_examples lines so that I am not trying to boil the ocean on my laptop). Even then, this takes about 3 minutes in which you won't see any output ...
###Code
%%bash
echo "bucket=${BUCKET}"
rm -rf babyweight_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/babyweight
python -m trainer.task \
--bucket=${BUCKET} \
--output_dir=babyweight_trained \
--job-dir=./tmp \
--pattern="00000-of-" --train_examples=1 --eval_steps=1
###Output
_____no_output_____
###Markdown
Lab Task 4The JSON below represents an input into your prediction model. Write the input.json file below with the next cell, then run the prediction locally to assess whether it produces predictions correctly.
###Code
%%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%%bash
sudo find "/usr/lib/google-cloud-sdk/lib/googlecloudsdk/command_lib/ml_engine" -name '*.pyc' -delete
%%bash
MODEL_LOCATION=$(ls -d $(pwd)/babyweight_trained/export/exporter/* | tail -1)
echo $MODEL_LOCATION1
gcloud ai-platform local predict --model-dir=$MODEL_LOCATION --json-instances=inputs.json
###Output
_____no_output_____
###Markdown
Lab Task 5Once the code works in standalone mode, you can run it on Cloud AI Platform. Because this is on the entire dataset, it will take a while. The training run took about two hours for me. You can monitor the job from the GCP console in the Cloud AI Platform section.
###Code
%%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=2.4 \
--python-version=3.7 \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=20000
###Output
_____no_output_____
###Markdown
When I ran it, I used train_examples=20000. When training finished, I filtered in the Stackdriver log on the word "dict" and saw that the last line was:Saving dict for global step 5714290: average_loss = 1.06473, global_step = 5714290, loss = 34882.4, rmse = 1.03186The final RMSE was 1.03 pounds. Repeat training This time with tuned parameters (note last line)
###Code
%%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model_tuned
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=2.4 \
--python-version=3.7 \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=2000 --batch_size=35 --nembeds=16 --nnsize=281
###Output
_____no_output_____
###Markdown
Training on Cloud ML Engine This notebook illustrates distributed training and hyperparameter tuning on Cloud ML Engine.
###Code
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.7'
%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical set of preprocessed files if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight gs://${BUCKET}
fi
%bash
gsutil ls gs://${BUCKET}/babyweight/preproc/*-00000*
###Output
_____no_output_____
###Markdown
Now that we have the TensorFlow code working on a subset of the data, we can package the TensorFlow code up as a Python module and train it on Cloud ML Engine. Train on Cloud ML Engine Training on Cloud ML Engine requires: Making the code a Python package Using gcloud to submit the training code to Cloud ML EngineThe code in model.py is the same as in the TensorFlow notebook. I just moved it to a file so that I could package it up as a module.(explore the directory structure).
###Code
%bash
grep "^def" babyweight/trainer/model.py
###Output
_____no_output_____
###Markdown
After moving the code to a package, make sure it works standalone. (Note the --pattern and --train_examples lines so that I am not trying to boil the ocean on my laptop). Even then, this takes about 3 minutes in which you won't see any output ...
###Code
%bash
echo "bucket=${BUCKET}"
rm -rf babyweight_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/babyweight
python -m trainer.task \
--bucket=${BUCKET} \
--output_dir=babyweight_trained \
--job-dir=./tmp \
--pattern="00000-of-" --train_examples=1 --eval_steps=1
%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%bash
MODEL_LOCATION=$(ls -d $(pwd)/babyweight_trained/export/exporter/* | tail -1)
echo $MODEL_LOCATION
gcloud ml-engine local predict --model-dir=$MODEL_LOCATION --json-instances=inputs.json
###Output
_____no_output_____
###Markdown
Once the code works in standalone mode, you can run it on Cloud ML Engine. Because this is on the entire dataset, it will take a while. The training run took about an hour for me. You can monitor the job from the GCP console in the Cloud Machine Learning Engine section.
###Code
%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=200000
###Output
_____no_output_____
###Markdown
When I ran it, I used train_examples=2000000. When training finished, I filtered in the Stackdriver log on the word "dict" and saw that the last line was:Saving dict for global step 5714290: average_loss = 1.06473, global_step = 5714290, loss = 34882.4, rmse = 1.03186The final RMSE was 1.03 pounds.
###Code
from google.datalab.ml import TensorBoard
TensorBoard().start('gs://{}/babyweight/trained_model'.format(BUCKET))
for pid in TensorBoard.list()['pid']:
TensorBoard().stop(pid)
print 'Stopped TensorBoard with pid {}'.format(pid)
###Output
_____no_output_____
###Markdown
Hyperparameter tuning All of these are command-line parameters to my program. To do hyperparameter tuning, create hyperparam.xml and pass it as --configFile.This step will take 1 hour -- you can increase maxParallelTrials or reduce maxTrials to get it done faster. Since maxParallelTrials is the number of initial seeds to start searching from, you don't want it to be too large; otherwise, all you have is a random search.
###Code
%writefile hyperparam.yaml
trainingInput:
scaleTier: STANDARD_1
hyperparameters:
hyperparameterMetricTag: rmse
goal: MINIMIZE
maxTrials: 20
maxParallelTrials: 5
enableTrialEarlyStopping: True
params:
- parameterName: batch_size
type: INTEGER
minValue: 8
maxValue: 512
scaleType: UNIT_LOG_SCALE
- parameterName: nembeds
type: INTEGER
minValue: 3
maxValue: 30
scaleType: UNIT_LINEAR_SCALE
- parameterName: nnsize
type: INTEGER
minValue: 64
maxValue: 512
scaleType: UNIT_LOG_SCALE
%bash
OUTDIR=gs://${BUCKET}/babyweight/hyperparam
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--config=hyperparam.yaml \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--eval_steps=10 \
--train_examples=20000
###Output
_____no_output_____
###Markdown
Repeat training This time with tuned parameters (note last line)
###Code
%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model_tuned
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=20000 --batch_size=35 --nembeds=16 --nnsize=281
###Output
_____no_output_____
###Markdown
Training on Cloud AI PlatformThis notebook illustrates distributed training on Cloud AI Platform (formerly known as Cloud ML Engine).
###Code
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.1
# change these to try this notebook out
BUCKET = 'gcpvijay04'
PROJECT = 'qwiklabs-gcp-01-974853e7c436'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '2.1'
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/babyweight/preproc; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical set of preprocessed files if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight gs://${BUCKET}
fi
%%bash
gsutil ls gs://${BUCKET}/babyweight/preproc/*-00000*
###Output
gs://gcpvijay04/babyweight/preproc/eval.csv-00000-of-00012
gs://gcpvijay04/babyweight/preproc/train.csv-00000-of-00043
###Markdown
Now that we have the TensorFlow code working on a subset of the data, we can package the TensorFlow code up as a Python module and train it on Cloud AI Platform. Train on Cloud AI PlatformTraining on Cloud AI Platform requires: Making the code a Python package Using gcloud to submit the training code to Cloud AI PlatformEnsure that the AI Platform API is enabled by going to this [link](https://console.developers.google.com/apis/library/ml.googleapis.com). Lab Task 1The following code edits babyweight/trainer/task.py.
###Code
%%writefile babyweight/trainer/task.py
import argparse
import json
import os
from . import model
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--bucket',
help = 'GCS path to data. We assume that data is in gs://BUCKET/babyweight/preproc/',
required = True
)
parser.add_argument(
'--output_dir',
help = 'GCS location to write checkpoints and export models',
required = True
)
parser.add_argument(
'--batch_size',
help = 'Number of examples to compute gradient over.',
type = int,
default = 512
)
parser.add_argument(
'--job-dir',
help = 'this model ignores this field, but it is required by gcloud',
default = 'junk'
)
parser.add_argument(
'--nnsize',
help = 'Hidden layer sizes to use for DNN feature columns -- provide space-separated layers',
nargs = '+',
type = int,
default=[128, 32, 4]
)
parser.add_argument(
'--nembeds',
help = 'Embedding size of a cross of n key real-valued parameters',
type = int,
default = 3
)
## TODO 1: add the new arguments here
parser.add_argument(
'--train_examples',
help = 'Number of examples (in thousands) to run the training job over. If this is more than actual # of examples available, it cycles through them. So specifying 1000 here when you have only 100k examples makes this 10 epochs.',
type = int,
default = 5000
)
parser.add_argument(
'--pattern',
help = 'Specify a pattern that has to be in input files. For example 00001-of will process only one shard',
default = 'of'
)
parser.add_argument(
'--eval_steps',
help = 'Positive number of steps for which to evaluate model. Default to None, which means to evaluate until input_fn raises an end-of-input exception',
type = int,
default = None
)
## parse all arguments
args = parser.parse_args()
arguments = args.__dict__
# unused args provided by service
arguments.pop('job_dir', None)
arguments.pop('job-dir', None)
## assign the arguments to the model variables
output_dir = arguments.pop('output_dir')
model.BUCKET = arguments.pop('bucket')
model.BATCH_SIZE = arguments.pop('batch_size')
model.TRAIN_STEPS = (arguments.pop('train_examples') * 100) / model.BATCH_SIZE
model.EVAL_STEPS = arguments.pop('eval_steps')
print ("Will train for {} steps using batch_size={}".format(model.TRAIN_STEPS, model.BATCH_SIZE))
model.PATTERN = arguments.pop('pattern')
model.NEMBEDS= arguments.pop('nembeds')
model.NNSIZE = arguments.pop('nnsize')
print ("Will use DNN size of {}".format(model.NNSIZE))
# Append trial_id to path if we are doing hptuning
# This code can be removed if you are not using hyperparameter tuning
output_dir = os.path.join(
output_dir,
json.loads(
os.environ.get('TF_CONFIG', '{}')
).get('task', {}).get('trial', '')
)
# Run the training job
model.train_and_evaluate(output_dir)
###Output
Overwriting babyweight/trainer/task.py
###Markdown
Lab Task 2The following code edits babyweight/trainer/model.py.
###Code
%%writefile babyweight/trainer/model.py
import shutil
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.logging.set_verbosity(tf.logging.INFO)
BUCKET = None # set from task.py
PATTERN = 'of' # gets all files
# Determine CSV, label, and key columns
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks,key'.split(',')
LABEL_COLUMN = 'weight_pounds'
KEY_COLUMN = 'key'
# Set default values for each CSV column
DEFAULTS = [[0.0], ['null'], [0.0], ['null'], [0.0], ['nokey']]
# Define some hyperparameters
TRAIN_STEPS = 10000
EVAL_STEPS = None
BATCH_SIZE = 512
NEMBEDS = 3
NNSIZE = [64, 16, 4]
# Create an input function reading a file using the Dataset API
# Then provide the results to the Estimator API
def read_dataset(prefix, mode, batch_size):
def _input_fn():
def decode_csv(value_column):
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# Use prefix to create file path
file_path = 'gs://{}/babyweight/preproc/{}*{}*'.format(BUCKET, prefix, PATTERN)
# Create list of files that match pattern
file_list = tf.gfile.Glob(file_path)
# Create dataset from file list
dataset = (tf.data.TextLineDataset(file_list) # Read text file
.map(decode_csv)) # Transform each elem by applying decode_csv fn
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
return _input_fn
# Define feature columns
def get_wide_deep():
# Define column types
is_male,mother_age,plurality,gestation_weeks = \
[\
tf.feature_column.categorical_column_with_vocabulary_list('is_male',
['True', 'False', 'Unknown']),
tf.feature_column.numeric_column('mother_age'),
tf.feature_column.categorical_column_with_vocabulary_list('plurality',
['Single(1)', 'Twins(2)', 'Triplets(3)',
'Quadruplets(4)', 'Quintuplets(5)','Multiple(2+)']),
tf.feature_column.numeric_column('gestation_weeks')
]
# Discretize
age_buckets = tf.feature_column.bucketized_column(mother_age,
boundaries=np.arange(15,45,1).tolist())
gestation_buckets = tf.feature_column.bucketized_column(gestation_weeks,
boundaries=np.arange(17,47,1).tolist())
# Sparse columns are wide, have a linear relationship with the output
wide = [is_male,
plurality,
age_buckets,
gestation_buckets]
# Feature cross all the wide columns and embed into a lower dimension
crossed = tf.feature_column.crossed_column(wide, hash_bucket_size=20000)
embed = tf.feature_column.embedding_column(crossed, NEMBEDS)
# Continuous columns are deep, have a complex relationship with the output
deep = [mother_age,
gestation_weeks,
embed]
return wide, deep
# Create serving input function to be able to serve predictions later using provided inputs
def serving_input_fn():
feature_placeholders = {
'is_male': tf.placeholder(tf.string, [None]),
'mother_age': tf.placeholder(tf.float32, [None]),
'plurality': tf.placeholder(tf.string, [None]),
'gestation_weeks': tf.placeholder(tf.float32, [None]),
KEY_COLUMN: tf.placeholder_with_default(tf.constant(['nokey']), [None])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# create metric for hyperparameter tuning
def my_rmse(labels, predictions):
pred_values = predictions['predictions']
return {'rmse': tf.metrics.root_mean_squared_error(labels, pred_values)}
def forward_features(estimator, key):
def new_model_fn(features, labels, mode, config):
spec = estimator.model_fn(features, labels, mode, config)
predictions = spec.predictions
predictions[key] = features[key]
spec = spec._replace(predictions=predictions)
return spec
return tf.estimator.Estimator(model_fn=new_model_fn, model_dir=estimator.model_dir, config=estimator.config)
# Create estimator to train and evaluate
def train_and_evaluate(output_dir):
tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
wide, deep = get_wide_deep()
EVAL_INTERVAL = 300 # seconds
## TODO 2a: set the save_checkpoints_secs to the EVAL_INTERVAL
run_config = tf.estimator.RunConfig(save_checkpoints_secs = EVAL_INTERVAL,
keep_checkpoint_max = 3)
## TODO 2b: change the dnn_hidden_units to NNSIZE
estimator = tf.estimator.DNNLinearCombinedRegressor(
model_dir = output_dir,
linear_feature_columns = wide,
dnn_feature_columns = deep,
dnn_hidden_units = NNSIZE,
config = run_config)
# illustrates how to add an extra metric
estimator = tf.estimator.add_metrics(estimator, my_rmse)
# for batch prediction, you need a key associated with each instance
estimator = forward_features(estimator, KEY_COLUMN)
## TODO 2c: Set the third argument of read_dataset to BATCH_SIZE
## TODO 2d: and set max_steps to TRAIN_STEPS
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset('train', tf.estimator.ModeKeys.TRAIN, BATCH_SIZE),
max_steps = TRAIN_STEPS)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn, exports_to_keep=None)
## TODO 2e: Lastly, set steps equal to EVAL_STEPS
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset('eval', tf.estimator.ModeKeys.EVAL, 2**15), # no need to batch in eval
steps = EVAL_STEPS,
start_delay_secs = 60, # start evaluating after N seconds
throttle_secs = EVAL_INTERVAL, # evaluate every N seconds
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
###Output
Overwriting babyweight/trainer/model.py
###Markdown
Lab Task 3After moving the code to a package, make sure it works standalone. (Note the --pattern and --train_examples lines so that I am not trying to boil the ocean on my laptop). Even then, this takes about 3 minutes in which you won't see any output ...
###Code
%%bash
echo "bucket=${BUCKET}"
rm -rf babyweight_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/babyweight
python -m trainer.task \
--bucket=${BUCKET} \
--output_dir=babyweight_trained \
--job-dir=./tmp \
--pattern="00000-of-" --train_examples=1 --eval_steps=1
###Output
bucket=gcpvijay04
Will train for 0.1953125 steps using batch_size=512
Will use DNN size of [128, 32, 4]
###Markdown
Lab Task 4The JSON below represents an input into your prediction model. Write the input.json file below with the next cell, then run the prediction locally to assess whether it produces predictions correctly.
###Code
%%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%%bash
sudo find "/usr/lib/google-cloud-sdk/lib/googlecloudsdk/command_lib/ml_engine" -name '*.pyc' -delete
%%bash
MODEL_LOCATION=$(ls -d $(pwd)/babyweight_trained/export/exporter/* | tail -1)
echo $MODEL_LOCATION1
gcloud ai-platform local predict --model-dir=$MODEL_LOCATION --json-instances=inputs.json
###Output
PREDICTIONS
[-0.8255363702774048]
[-0.914253294467926]
###Markdown
Lab Task 5Once the code works in standalone mode, you can run it on Cloud AI Platform. Because this is on the entire dataset, it will take a while. The training run took about two hours for me. You can monitor the job from the GCP console in the Cloud AI Platform section.
###Code
%%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=2.1 \
--python-version=3.7 \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=20000
###Output
gs://gcpvijay04/babyweight/trained_model us-central1 babyweight_210103_225636
jobId: babyweight_210103_225636
state: QUEUED
###Markdown
When I ran it, I used train_examples=20000. When training finished, I filtered in the Stackdriver log on the word "dict" and saw that the last line was:Saving dict for global step 5714290: average_loss = 1.06473, global_step = 5714290, loss = 34882.4, rmse = 1.03186The final RMSE was 1.03 pounds. Repeat training This time with tuned parameters (note last line)
###Code
%%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model_tuned
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=2.1 \
--python-version=3.7 \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=2000 --batch_size=35 --nembeds=16 --nnsize=281
###Output
gs://gcpvijay04/babyweight/trained_model_tuned us-central1 babyweight_210103_232659
jobId: babyweight_210103_232659
state: QUEUED
###Markdown
Training on Cloud AI PlatformThis notebook illustrates distributed training and hyperparameter tuning on Cloud AI Platform (formerly known as Cloud ML Engine).
###Code
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.13'
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/babyweight/preproc; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical set of preprocessed files if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight gs://${BUCKET}
fi
%%bash
gsutil ls gs://${BUCKET}/babyweight/preproc/*-00000*
###Output
_____no_output_____
###Markdown
Now that we have the TensorFlow code working on a subset of the data, we can package the TensorFlow code up as a Python module and train it on Cloud AI Platform. Train on Cloud AI PlatformTraining on Cloud AI Platform requires: Making the code a Python package Using gcloud to submit the training code to Cloud AI PlatformEnsure that the AI Platform API is enabled by going to this [link](https://console.developers.google.com/apis/library/ml.googleapis.com). Lab Task 1The following code edits babyweight/trainer/task.py.
###Code
%%writefile babyweight/trainer/task.py
import argparse
import json
import os
from . import model
import tensorflow as tf
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--bucket',
help = 'GCS path to data. We assume that data is in gs://BUCKET/babyweight/preproc/',
required = True
)
parser.add_argument(
'--output_dir',
help = 'GCS location to write checkpoints and export models',
required = True
)
parser.add_argument(
'--batch_size',
help = 'Number of examples to compute gradient over.',
type = int,
default = 512
)
parser.add_argument(
'--job-dir',
help = 'this model ignores this field, but it is required by gcloud',
default = 'junk'
)
parser.add_argument(
'--nnsize',
help = 'Hidden layer sizes to use for DNN feature columns -- provide space-separated layers',
nargs = '+',
type = int,
default=[128, 32, 4]
)
parser.add_argument(
'--nembeds',
help = 'Embedding size of a cross of n key real-valued parameters',
type = int,
default = 3
)
## TODO 1: add the new arguments here
parser.add_argument(
'--train_examples',
help = 'Number of examples (in thousands) to run the training job over. If this is more than actual # of examples available, it cycles through them. So specifying 1000 here when you have only 100k examples makes this 10 epochs.',
type = int,
default = 5000
)
parser.add_argument(
'--pattern',
help = 'Specify a pattern that has to be in input files. For example 00001-of will process only one shard',
default = 'of'
)
parser.add_argument(
'--eval_steps',
help = 'Positive number of steps for which to evaluate model. Default to None, which means to evaluate until input_fn raises an end-of-input exception',
type = int,
default = None
)
## parse all arguments
args = parser.parse_args()
arguments = args.__dict__
# unused args provided by service
arguments.pop('job_dir', None)
arguments.pop('job-dir', None)
## assign the arguments to the model variables
output_dir = arguments.pop('output_dir')
model.BUCKET = arguments.pop('bucket')
model.BATCH_SIZE = arguments.pop('batch_size')
model.TRAIN_STEPS = (arguments.pop('train_examples') * 1000) / model.BATCH_SIZE
model.EVAL_STEPS = arguments.pop('eval_steps')
print ("Will train for {} steps using batch_size={}".format(model.TRAIN_STEPS, model.BATCH_SIZE))
model.PATTERN = arguments.pop('pattern')
model.NEMBEDS= arguments.pop('nembeds')
model.NNSIZE = arguments.pop('nnsize')
print ("Will use DNN size of {}".format(model.NNSIZE))
# Append trial_id to path if we are doing hptuning
# This code can be removed if you are not using hyperparameter tuning
output_dir = os.path.join(
output_dir,
json.loads(
os.environ.get('TF_CONFIG', '{}')
).get('task', {}).get('trial', '')
)
# Run the training job
model.train_and_evaluate(output_dir)
###Output
_____no_output_____
###Markdown
Lab Task 2The following code edits babyweight/trainer/model.py.
###Code
%%writefile babyweight/trainer/model.py
import shutil
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
BUCKET = None # set from task.py
PATTERN = 'of' # gets all files
# Determine CSV, label, and key columns
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks,key'.split(',')
LABEL_COLUMN = 'weight_pounds'
KEY_COLUMN = 'key'
# Set default values for each CSV column
DEFAULTS = [[0.0], ['null'], [0.0], ['null'], [0.0], ['nokey']]
# Define some hyperparameters
TRAIN_STEPS = 10000
EVAL_STEPS = None
BATCH_SIZE = 512
NEMBEDS = 3
NNSIZE = [64, 16, 4]
# Create an input function reading a file using the Dataset API
# Then provide the results to the Estimator API
def read_dataset(prefix, mode, batch_size):
def _input_fn():
def decode_csv(value_column):
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# Use prefix to create file path
file_path = 'gs://{}/babyweight/preproc/{}*{}*'.format(BUCKET, prefix, PATTERN)
# Create list of files that match pattern
file_list = tf.gfile.Glob(file_path)
# Create dataset from file list
dataset = (tf.data.TextLineDataset(file_list) # Read text file
.map(decode_csv)) # Transform each elem by applying decode_csv fn
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
return _input_fn
# Define feature columns
def get_wide_deep():
# Define column types
is_male,mother_age,plurality,gestation_weeks = \
[\
tf.feature_column.categorical_column_with_vocabulary_list('is_male',
['True', 'False', 'Unknown']),
tf.feature_column.numeric_column('mother_age'),
tf.feature_column.categorical_column_with_vocabulary_list('plurality',
['Single(1)', 'Twins(2)', 'Triplets(3)',
'Quadruplets(4)', 'Quintuplets(5)','Multiple(2+)']),
tf.feature_column.numeric_column('gestation_weeks')
]
# Discretize
age_buckets = tf.feature_column.bucketized_column(mother_age,
boundaries=np.arange(15,45,1).tolist())
gestation_buckets = tf.feature_column.bucketized_column(gestation_weeks,
boundaries=np.arange(17,47,1).tolist())
# Sparse columns are wide, have a linear relationship with the output
wide = [is_male,
plurality,
age_buckets,
gestation_buckets]
# Feature cross all the wide columns and embed into a lower dimension
crossed = tf.feature_column.crossed_column(wide, hash_bucket_size=20000)
embed = tf.feature_column.embedding_column(crossed, NEMBEDS)
# Continuous columns are deep, have a complex relationship with the output
deep = [mother_age,
gestation_weeks,
embed]
return wide, deep
# Create serving input function to be able to serve predictions later using provided inputs
def serving_input_fn():
feature_placeholders = {
'is_male': tf.placeholder(tf.string, [None]),
'mother_age': tf.placeholder(tf.float32, [None]),
'plurality': tf.placeholder(tf.string, [None]),
'gestation_weeks': tf.placeholder(tf.float32, [None]),
KEY_COLUMN: tf.placeholder_with_default(tf.constant(['nokey']), [None])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# create metric for hyperparameter tuning
def my_rmse(labels, predictions):
pred_values = predictions['predictions']
return {'rmse': tf.metrics.root_mean_squared_error(labels, pred_values)}
# Create estimator to train and evaluate
def train_and_evaluate(output_dir):
tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
wide, deep = get_wide_deep()
EVAL_INTERVAL = 300 # seconds
## TODO 2a: set the save_checkpoints_secs to the EVAL_INTERVAL
run_config = tf.estimator.RunConfig(save_checkpoints_secs = EVAL_INTERVAL,
keep_checkpoint_max = 3)
## TODO 2b: change the dnn_hidden_units to NNSIZE
estimator = tf.estimator.DNNLinearCombinedRegressor(
model_dir = output_dir,
linear_feature_columns = wide,
dnn_feature_columns = deep,
dnn_hidden_units = NNSIZE,
config = run_config)
# illustrates how to add an extra metric
estimator = tf.contrib.estimator.add_metrics(estimator, my_rmse)
# for batch prediction, you need a key associated with each instance
estimator = tf.contrib.estimator.forward_features(estimator, KEY_COLUMN)
## TODO 2c: Set the third argument of read_dataset to BATCH_SIZE
## TODO 2d: and set max_steps to TRAIN_STEPS
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset('train', tf.estimator.ModeKeys.TRAIN, BATCH_SIZE),
max_steps = TRAIN_STEPS)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn, exports_to_keep=None)
## TODO 2e: Lastly, set steps equal to EVAL_STEPS
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset('eval', tf.estimator.ModeKeys.EVAL, 2**15), # no need to batch in eval
steps = EVAL_STEPS,
start_delay_secs = 60, # start evaluating after N seconds
throttle_secs = EVAL_INTERVAL, # evaluate every N seconds
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
###Output
_____no_output_____
###Markdown
Lab Task 3After moving the code to a package, make sure it works standalone. (Note the --pattern and --train_examples lines so that I am not trying to boil the ocean on my laptop). Even then, this takes about 3 minutes in which you won't see any output ...
###Code
%%bash
echo "bucket=${BUCKET}"
rm -rf babyweight_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/babyweight
python -m trainer.task \
--bucket=${BUCKET} \
--output_dir=babyweight_trained \
--job-dir=./tmp \
--pattern="00000-of-" --train_examples=1 --eval_steps=1
###Output
_____no_output_____
###Markdown
Lab Task 4The JSON below represents an input into your prediction model. Write the input.json file below with the next cell, then run the prediction locally to assess whether it produces predictions correctly.
###Code
%%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
%%bash
sudo find "/usr/lib/google-cloud-sdk/lib/googlecloudsdk/command_lib/ml_engine" -name '*.pyc' -delete
%%bash
MODEL_LOCATION=$(ls -d $(pwd)/babyweight_trained/export/exporter/* | tail -1)
echo $MODEL_LOCATION1
gcloud ai-platform local predict --model-dir=$MODEL_LOCATION --json-instances=inputs.json
###Output
_____no_output_____
###Markdown
Lab Task 5Once the code works in standalone mode, you can run it on Cloud AI Platform. Because this is on the entire dataset, it will take a while. The training run took about two hours for me. You can monitor the job from the GCP console in the Cloud AI Platform section.
###Code
%%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=200000
###Output
_____no_output_____
###Markdown
When I ran it, I used train_examples=2000000. When training finished, I filtered in the Stackdriver log on the word "dict" and saw that the last line was:Saving dict for global step 5714290: average_loss = 1.06473, global_step = 5714290, loss = 34882.4, rmse = 1.03186The final RMSE was 1.03 pounds. Hyperparameter tuning All of these are command-line parameters to my program. To do hyperparameter tuning, create hyperparam.xml and pass it as --configFile.This step will take up to 2 hours -- you can increase maxParallelTrials or reduce maxTrials to get it done faster. Since maxParallelTrials is the number of initial seeds to start searching from, you don't want it to be too large; otherwise, all you have is a random search.
###Code
%%writefile hyperparam.yaml
trainingInput:
scaleTier: STANDARD_1
hyperparameters:
hyperparameterMetricTag: rmse
goal: MINIMIZE
maxTrials: 20
maxParallelTrials: 5
enableTrialEarlyStopping: True
params:
- parameterName: batch_size
type: INTEGER
minValue: 8
maxValue: 512
scaleType: UNIT_LOG_SCALE
- parameterName: nembeds
type: INTEGER
minValue: 3
maxValue: 30
scaleType: UNIT_LINEAR_SCALE
- parameterName: nnsize
type: INTEGER
minValue: 64
maxValue: 512
scaleType: UNIT_LOG_SCALE
%%bash
OUTDIR=gs://${BUCKET}/babyweight/hyperparam
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--config=hyperparam.yaml \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--eval_steps=10 \
--train_examples=20000
###Output
_____no_output_____
###Markdown
Repeat training This time with tuned parameters (note last line)
###Code
%%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model_tuned
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=20000 --batch_size=35 --nembeds=16 --nnsize=281
###Output
_____no_output_____
###Markdown
Training on Cloud ML Engine This notebook illustrates distributed training and hyperparameter tuning on Cloud ML Engine.
###Code
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.8'
%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/babyweight/preproc; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical set of preprocessed files if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight gs://${BUCKET}
fi
%bash
gsutil ls gs://${BUCKET}/babyweight/preproc/*-00000*
###Output
_____no_output_____
###Markdown
Now that we have the TensorFlow code working on a subset of the data, we can package the TensorFlow code up as a Python module and train it on Cloud ML Engine. Train on Cloud ML Engine Training on Cloud ML Engine requires: Making the code a Python package Using gcloud to submit the training code to Cloud ML Engine Lab Task 1The following code edits babyweight/trainer/task.py.
###Code
%writefile babyweight/trainer/task.py
import argparse
import json
import os
from . import model
import tensorflow as tf
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--bucket',
help = 'GCS path to data. We assume that data is in gs://BUCKET/babyweight/preproc/',
required = True
)
parser.add_argument(
'--output_dir',
help = 'GCS location to write checkpoints and export models',
required = True
)
parser.add_argument(
'--batch_size',
help = 'Number of examples to compute gradient over.',
type = int,
default = 512
)
parser.add_argument(
'--job-dir',
help = 'this model ignores this field, but it is required by gcloud',
default = 'junk'
)
parser.add_argument(
'--nnsize',
help = 'Hidden layer sizes to use for DNN feature columns -- provide space-separated layers',
nargs = '+',
type = int,
default=[128, 32, 4]
)
parser.add_argument(
'--nembeds',
help = 'Embedding size of a cross of n key real-valued parameters',
type = int,
default = 3
)
## TODO 1: add the new arguments here
parser.add_argument(
'--train_examples',
help = 'Number of examples (in thousands) to run the training job over. If this is more than actual # of examples available, it cycles through them. So specifying 1000 here when you have only 100k examples makes this 10 epochs.',
type = int,
default = 5000
)
parser.add_argument(
'--pattern',
help = 'Specify a pattern that has to be in input files. For example 00001-of will process only one shard',
default = 'of'
)
parser.add_argument(
'--eval_steps',
help = 'Positive number of steps for which to evaluate model. Default to None, which means to evaluate until input_fn raises an end-of-input exception',
type = int,
default = None
)
## parse all arguments
args = parser.parse_args()
arguments = args.__dict__
# unused args provided by service
arguments.pop('job_dir', None)
arguments.pop('job-dir', None)
## assign the arguments to the model variables
output_dir = arguments.pop('output_dir')
model.BUCKET = arguments.pop('bucket')
model.BATCH_SIZE = arguments.pop('batch_size')
model.TRAIN_STEPS = (arguments.pop('train_examples') * 1000) / model.BATCH_SIZE
model.EVAL_STEPS = arguments.pop('eval_steps')
print ("Will train for {} steps using batch_size={}".format(model.TRAIN_STEPS, model.BATCH_SIZE))
model.PATTERN = arguments.pop('pattern')
model.NEMBEDS= arguments.pop('nembeds')
model.NNSIZE = arguments.pop('nnsize')
print ("Will use DNN size of {}".format(model.NNSIZE))
# Append trial_id to path if we are doing hptuning
# This code can be removed if you are not using hyperparameter tuning
output_dir = os.path.join(
output_dir,
json.loads(
os.environ.get('TF_CONFIG', '{}')
).get('task', {}).get('trial', '')
)
# Run the training job
model.train_and_evaluate(output_dir)
###Output
_____no_output_____
###Markdown
Lab Task 2The following code edits babyweight/trainer/model.py.
###Code
%writefile babyweight/trainer/model.py
import shutil
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
BUCKET = None # set from task.py
PATTERN = 'of' # gets all files
# Determine CSV, label, and key columns
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks,key'.split(',')
LABEL_COLUMN = 'weight_pounds'
KEY_COLUMN = 'key'
# Set default values for each CSV column
DEFAULTS = [[0.0], ['null'], [0.0], ['null'], [0.0], ['nokey']]
# Define some hyperparameters
TRAIN_STEPS = 10000
EVAL_STEPS = None
BATCH_SIZE = 512
NEMBEDS = 3
NNSIZE = [64, 16, 4]
# Create an input function reading a file using the Dataset API
# Then provide the results to the Estimator API
def read_dataset(prefix, mode, batch_size):
def _input_fn():
def decode_csv(value_column):
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# Use prefix to create file path
file_path = 'gs://{}/babyweight/preproc/{}*{}*'.format(BUCKET, prefix, PATTERN)
# Create list of files that match pattern
file_list = tf.gfile.Glob(file_path)
# Create dataset from file list
dataset = (tf.data.TextLineDataset(file_list) # Read text file
.map(decode_csv)) # Transform each elem by applying decode_csv fn
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
return _input_fn
# Define feature columns
def get_wide_deep():
# Define column types
is_male,mother_age,plurality,gestation_weeks = \
[\
tf.feature_column.categorical_column_with_vocabulary_list('is_male',
['True', 'False', 'Unknown']),
tf.feature_column.numeric_column('mother_age'),
tf.feature_column.categorical_column_with_vocabulary_list('plurality',
['Single(1)', 'Twins(2)', 'Triplets(3)',
'Quadruplets(4)', 'Quintuplets(5)','Multiple(2+)']),
tf.feature_column.numeric_column('gestation_weeks')
]
# Discretize
age_buckets = tf.feature_column.bucketized_column(mother_age,
boundaries=np.arange(15,45,1).tolist())
gestation_buckets = tf.feature_column.bucketized_column(gestation_weeks,
boundaries=np.arange(17,47,1).tolist())
# Sparse columns are wide, have a linear relationship with the output
wide = [is_male,
plurality,
age_buckets,
gestation_buckets]
# Feature cross all the wide columns and embed into a lower dimension
crossed = tf.feature_column.crossed_column(wide, hash_bucket_size=20000)
embed = tf.feature_column.embedding_column(crossed, NEMBEDS)
# Continuous columns are deep, have a complex relationship with the output
deep = [mother_age,
gestation_weeks,
embed]
return wide, deep
# Create serving input function to be able to serve predictions later using provided inputs
def serving_input_fn():
feature_placeholders = {
'is_male': tf.placeholder(tf.string, [None]),
'mother_age': tf.placeholder(tf.float32, [None]),
'plurality': tf.placeholder(tf.string, [None]),
'gestation_weeks': tf.placeholder(tf.float32, [None]),
KEY_COLUMN: tf.placeholder_with_default(tf.constant(['nokey']), [None])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# create metric for hyperparameter tuning
def my_rmse(labels, predictions):
pred_values = predictions['predictions']
return {'rmse': tf.metrics.root_mean_squared_error(labels, pred_values)}
# Create estimator to train and evaluate
def train_and_evaluate(output_dir):
tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
wide, deep = get_wide_deep()
EVAL_INTERVAL = 300 # seconds
## TODO 2a: set the save_checkpoints_secs to the EVAL_INTERVAL
run_config = tf.estimator.RunConfig(save_checkpoints_secs = EVAL_INTERVAL,
keep_checkpoint_max = 3)
## TODO 2b: change the dnn_hidden_units to NNSIZE
estimator = tf.estimator.DNNLinearCombinedRegressor(
model_dir = output_dir,
linear_feature_columns = wide,
dnn_feature_columns = deep,
dnn_hidden_units = NNSIZE,
config = run_config)
# illustrates how to add an extra metric
estimator = tf.contrib.estimator.add_metrics(estimator, my_rmse)
# for batch prediction, you need a key associated with each instance
estimator = tf.contrib.estimator.forward_features(estimator, KEY_COLUMN)
## TODO 2c: Set the third argument of read_dataset to BATCH_SIZE
## TODO 2d: and set max_steps to TRAIN_STEPS
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset('train', tf.estimator.ModeKeys.TRAIN, BATCH_SIZE),
max_steps = TRAIN_STEPS)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn, exports_to_keep=None)
## TODO 2e: Lastly, set steps equal to EVAL_STEPS
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset('eval', tf.estimator.ModeKeys.EVAL, 2**15), # no need to batch in eval
steps = EVAL_STEPS,
start_delay_secs = 60, # start evaluating after N seconds
throttle_secs = EVAL_INTERVAL, # evaluate every N seconds
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
###Output
_____no_output_____
###Markdown
Lab Task 3After moving the code to a package, make sure it works standalone. (Note the --pattern and --train_examples lines so that I am not trying to boil the ocean on my laptop). Even then, this takes about 3 minutes in which you won't see any output ...
###Code
%bash
echo "bucket=${BUCKET}"
rm -rf babyweight_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/babyweight
python -m trainer.task \
--bucket=${BUCKET} \
--output_dir=babyweight_trained \
--job-dir=./tmp \
--pattern="00000-of-" --train_examples=1 --eval_steps=1
###Output
_____no_output_____
###Markdown
Lab Task 4The JSON below represents an input into your prediction model. Write the input.json file below with the next cell, then run the prediction locally to assess whether it produces predictions correctly.
###Code
%writefile inputs.json
{"key": "b1", "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"key": "g1", "is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
## This doesn't play nice with Python 3, so skipping for now.
##%bash
##MODEL_LOCATION=$(ls -d $(pwd)/babyweight_trained/export/exporter/* | tail -1)
##echo $MODEL_LOCATION
##gcloud ml-engine local predict --model-dir=$MODEL_LOCATION --json-instances=inputs.json
###Output
_____no_output_____
###Markdown
Lab Task 5Once the code works in standalone mode, you can run it on Cloud ML Engine. Because this is on the entire dataset, it will take a while. The training run took about two hours for me. You can monitor the job from the GCP console in the Cloud Machine Learning Engine section.
###Code
%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=200000
###Output
_____no_output_____
###Markdown
When I ran it, I used train_examples=2000000. When training finished, I filtered in the Stackdriver log on the word "dict" and saw that the last line was:Saving dict for global step 5714290: average_loss = 1.06473, global_step = 5714290, loss = 34882.4, rmse = 1.03186The final RMSE was 1.03 pounds.
###Code
from google.datalab.ml import TensorBoard
TensorBoard().start('gs://{}/babyweight/trained_model'.format(BUCKET))
for pid in TensorBoard.list()['pid']:
TensorBoard().stop(pid)
print('Stopped TensorBoard with pid {}'.format(pid))
###Output
_____no_output_____
###Markdown
Hyperparameter tuning All of these are command-line parameters to my program. To do hyperparameter tuning, create hyperparam.xml and pass it as --configFile.This step will take up to 2 hours -- you can increase maxParallelTrials or reduce maxTrials to get it done faster. Since maxParallelTrials is the number of initial seeds to start searching from, you don't want it to be too large; otherwise, all you have is a random search.
###Code
%writefile hyperparam.yaml
trainingInput:
scaleTier: STANDARD_1
hyperparameters:
hyperparameterMetricTag: rmse
goal: MINIMIZE
maxTrials: 20
maxParallelTrials: 5
enableTrialEarlyStopping: True
params:
- parameterName: batch_size
type: INTEGER
minValue: 8
maxValue: 512
scaleType: UNIT_LOG_SCALE
- parameterName: nembeds
type: INTEGER
minValue: 3
maxValue: 30
scaleType: UNIT_LINEAR_SCALE
- parameterName: nnsize
type: INTEGER
minValue: 64
maxValue: 512
scaleType: UNIT_LOG_SCALE
%bash
OUTDIR=gs://${BUCKET}/babyweight/hyperparam
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--config=hyperparam.yaml \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--eval_steps=10 \
--train_examples=20000
###Output
_____no_output_____
###Markdown
Repeat training This time with tuned parameters (note last line)
###Code
%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model_tuned
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=20000 --batch_size=35 --nembeds=16 --nnsize=281
###Output
_____no_output_____ |
- Week4 - Approximate Q-learning ( DQN ...)/.ipynb_checkpoints/tme 4-checkpoint.ipynb | ###Markdown
Libraries
###Code
import matplotlib
matplotlib.use("TkAgg")
import gym
import gridworld
from gym import wrappers, logger
import numpy as np
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
np.random.seed(3)
# if gpu is to be used
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
###Output
_____no_output_____
###Markdown
Utils
###Code
################### Highly abstract policy class! #######################
class Policy(object):
def __init__(self):
""" Nothing is required for to construct an abstract policy class """
pass
def get_action_value(self):
"Must be redefined"
raise NotImplementedError
############################################################################
################### Uniform #######################
class Uniform_Policy(Policy):
def get_action_value(self , Q_state ):
""" Q_values numpy array for a given state size ( number of actions ) """
action = np.random.randint (Q_state.size)
return action , Q_state[action]
############################################################################
################### Greedy #######################
class Greedy_Policy(Policy):
def get_action_value(self , Q_state):
action = Q_state.argmax()
return action , Q_state[action]
############################################################################
################### ε-Greedy #######################
class Epsilon_Greedy_Policy(Policy):
def __init__(self , eps=0.1 , decay=1.0 ):
self.eps = eps
self.decay = decay
def get_action_value(self , Q_state ):
self.eps = self.eps*self.decay
if ( np.random.rand() <= (self.eps) ):
action = np.random.choice ( Q_state.size )
else :
action = Q_state.argmax()
return action , Q_state[action]
def set_epsilon(self,eps):
"""
when we want to perform a cutomized decay manually
"""
self.eps = eps
############################################################################
def make_policy ( name="Epsilon_Greedy" , params={ "eps":0.1 , "decay":1.0 } ) :
if ( name=="Uniform" ): return Uniform_Policy()
elif ( name=="Greedy" ) : return Greedy_Policy()
elif ( name=="Epsilon_Greedy" ) : return Epsilon_Greedy_Policy(**params)
else :
raise Exception("Unknown policy")
p = make_policy( name="Epsilon_Greedy" )
# class DQN(nn.Module):
# def __init__(self, in_channels=4, num_actions=2):
# """
# DQN
# """
# super(DQN, self).__init__()
# self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=8, stride=4)
# self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
# self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
# self.fc4 = nn.Linear(7 * 7 * 64, 512)
# self.fc5 = nn.Linear(512, num_actions)
# def forward(self, x):
# x = F.relu(self.conv1(x))
# x = F.relu(self.conv2(x))
# x = F.relu(self.conv3(x))
# x = F.relu(self.fc4(x.view(x.size(0), -1)))
# return self.fc5(x)
###Output
_____no_output_____
###Markdown
Algorithms DQN ( Experience replay + target Network )
###Code
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.curr_state = torch.Tensor().type(torch.float64)
self.next_state = torch.Tensor().type(torch.float64)
self.action = torch.Tensor().type(torch.uint8)
self.reward = torch.Tensor().type(torch.float64)
self.final_state_mask = []
self.position = 0
def push(self, Phi_S, action, Phi_next_S, reward, done):
"""Saves a transition."""
act = torch.tensor([[action]],dtype=torch.uint8)
if len(self) < self.capacity:
self.curr_state = torch.cat( [self.curr_state,Phi_S.view(1,-1)] ,0)
self.next_state = torch.cat( [self.next_state,Phi_next_S.view(1,-1)] ,0)
self.action = torch.cat([self.action,act], 0)
self.reward = torch.cat([self.reward,torch.tensor([reward],dtype=torch.float64).view(1,-1)], 0)
self.final_state_mask.append(not done)
else :
self.curr_state[self.position] = Phi_S
self.next_state[self.position] = Phi_next_S
self.action[self.position] = act
self.reward[self.position] = reward
self.final_state_mask[self.position] = done
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
indexes = np.random.choice(len(self),size=batch_size)
return self.curr_state[indexes], self.action[indexes],\
self.next_state[indexes], self.reward[indexes],\
torch.tensor( self.final_state_mask ) [indexes]
def __len__(self):
return len(self.curr_state)
class DQN_Q_Estimator(nn.Module):
"""
Simple Neural Network Q(state) ==> [ Q[state,action1], Q[state,action2], ... ]
"""
def __init__(self , in_size=4 , num_actions=2 , hidden_size=[] ):
super(DQN_Q_Estimator, self).__init__()
self.layers = nn.ModuleList([])
for x in hidden_size:
self.layers.append(nn.Linear(in_size, x))
in_size = x
self.layers.append(nn.Linear(in_size, num_actions))
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
def forward(self, x):
x = self.layers[0](x)
for i in range(1, len(self.layers)):
x = torch.nn.functional.relu(x)
x = self.layers[i](x)
return x
class CartPoleAgent(object):
"""The world's simplest agent!"""
def __init__(self, action_space, behavior_policy, memory_capacity=1000, hidden_size=[50],hiddevice=device):
self.action_space = action_space
self.replay_memory = ReplayMemory(memory_capacity)
self.Q_estimator_policy= DQN_Q_Estimator(in_size=4,num_actions=action_space.n,hidden_size=hidden_size)\
.double().to(device)
# copy from policy net
self.Q_estimator_target= DQN_Q_Estimator(in_size=4,num_actions=action_space.n,hidden_size=hidden_size)\
.double().to(device)
self.behavior_policy = behavior_policy
self.update_target_network()
def act(self, observation):
output = self.Q_estimator_policy(observation.to(device)).detach().cpu().numpy()
action, _= self.behavior_policy.get_action_value(output)
return action
def update_target_network(self):
"copy policy network parameters to target network parameters"
self.Q_estimator_target.load_state_dict(self.Q_estimator_policy.state_dict())
p = make_policy ( "Epsilon_Greedy" , params={ "eps" : EPS } )
agent = CartPoleAgent( envm.action_space, behavior_policy=p , memory_capacity=MEM_CAPACITY,
hidden_size=HIDDEN_SIZE )
env = gym.make('CartPole-v1')
cartpole_nbr_actions = []
outdir = 'cartpole-v0/CartPol-agent-results'
envm = wrappers.Monitor(env, directory=outdir, force=True, video_callable=False)
env.seed(0)
##################### hyper params #####################
episode_count = 500
env.verbose = True
np.random.seed(0)
rsum = 0
BATCH_SIZE = 32
LR = 0.001
EPS = 0.2
GAMMA = 0.99
TARGET_UPDATE = 100
MEM_CAPACITY = 1000
HIDDEN_SIZE = [128]
log_interval = 10
#########################################################
################## AGENT ##################
p = make_policy ( "Epsilon_Greedy" , params={ "eps" : EPS, "decay":0.9999 } )
agent = CartPoleAgent( envm.action_space, behavior_policy=p , memory_capacity=MEM_CAPACITY,
hidden_size=HIDDEN_SIZE )
###########################################
################# LOSS + optimizer ################
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam ( agent.Q_estimator_policy.parameters() , lr=LR )
###################################################
nbr_steps = 0
for i in range(episode_count):
obs = envm.reset()
env.verbose = (i % 100 == 0 and i > 0) # afficher 1 episode sur 100
if env.verbose:
env.render()
j = 0
rsum = 0
Phi_S = torch.from_numpy(obs)
while True:
action = agent.act(Phi_S)
j+=1
obs, reward, done, _ = envm.step(action)
Phi_next_S = torch.from_numpy(obs)
if done:
reward = -1
# Store the transition in memory
agent.replay_memory.push(Phi_S, action, Phi_next_S, reward, done)
if ( len(agent.replay_memory) >= MEM_CAPACITY ):
# get X_batch
state_batch, action_batch, next_state_batch, reward_batch, not_final_state_mask = \
agent.replay_memory.sample(BATCH_SIZE)
# create Y_batch
expected_Q = reward_batch.to(device).reshape(-1)
next_Q = agent.Q_estimator_target(next_state_batch.to(device)).max(1)[0]
expected_Q += not_final_state_mask* GAMMA * next_Q
# Forward pass
current_Q = agent.Q_estimator_policy(state_batch.to(device))
current_Q = current_Q[ list(range(BATCH_SIZE)) , action_batch.reshape(-1).tolist() ]
loss = criterion(current_Q, expected_Q)
# Backward a
optimizer.zero_grad()
loss.backward()
optimizer.step()
nbr_steps += 1
if( nbr_steps == TARGET_UPDATE ) :
print(nbr_steps)
agent.update_target_network()
print("update")
if(done):
if i % log_interval == 0:
print('episode {}| nbr_actions: {}| epsilon: {:.2f}'.format(i, j, agent.behavior_policy.eps))
break
# # logs
# rsum += reward
# j += 1
# if env.verbose:
# env.render()
# if done:
# cartpole_nbr_actions.append(j)
# print("Episode : " + str(i) + " rsum=" + str(rsum) + ", " + str(j) + " actions")
# break
print("done")
env.close()
env.close()
agent.Q_estimator_policy
###Output
_____no_output_____ |
8-Labs/Lab11/old_src/Lab11-TH.ipynb | ###Markdown
**Download** (right-click, save target as ...) this page as a jupyterlab notebook from: [Lab11-TH](http://54.243.252.9/engr-1330-webroot/8-Labs/Lab11/Lab11-TH.ipynb)___ Laboratory 11: Dataframes (Continued)LAST NAME, FIRST NAMER00000000ENGR 1330 Laboratory 11 - Homework Preamble script block to identify host, user, and kernel --- Exercise 1: Follow the steps below:1. STEP 0: install necessary libraries (numpy and pandas)2. STEP 1: There are 8 digits in your R. Define a 2x4 array with these 8 digits, name it "Rarray", and print it
###Code
# step 0 and step 1 go here!
###Output
_____no_output_____
###Markdown
3. STEP 2: Find the maximum value of the "Rarray" and its position
###Code
# step 2 goes here!
###Output
_____no_output_____
###Markdown
4. STEP 3: Sort the "Rarray" along the rows, store it in a new array named "Rarraysort", and print the new array out
###Code
# step 3 goes here!
###Output
_____no_output_____
###Markdown
5. STEP 4: Define and print a 4x4 array that has the "Rarray" as its two first rows, and "Rarraysort" as its next rows. Name this new array "DoubleRarray"
###Code
# step 4 goes here!
###Output
_____no_output_____
###Markdown
6. STEP 5: Slice and print a 4x3 array from the "DoubleRarray" that contains the last three columns of it. Name this new array "SliceRarray".
###Code
# step 5 goes here!
###Output
_____no_output_____
###Markdown
7. STEP 6: Define the "SliceRarray" as a panda dataframe: - name it "Rdataframe", - name the rows as "Row A","Row B","Row C", and "Row D" - name the columns as "Column 1", "Column 2", and "Column 3"
###Code
# step 6 goes here!
###Output
_____no_output_____
###Markdown
8. STEP 7: Print the first few rows of the "Rdataframe".
###Code
# step 7 goes here!
###Output
_____no_output_____
###Markdown
9. STEP 8: Create a new dataframe object ("R2dataframe") by adding a column to the "Rdataframe", name it "Column X" and fill it with "None" values. Then, use the appropriate descriptor function and print the data model (data column count, names, data types) of the "R2dataframe"
###Code
# step 8 goes here!
###Output
_____no_output_____
###Markdown
10. STEP 9: Replace the **'None'** in the "R2dataframe" with 0. Then, print the summary statistics of each numeric column in the data frame.
###Code
# step 9 goes here!
###Output
_____no_output_____
###Markdown
11. STEP 10: Define a function based on the equation below: $$ y = x^2 - 5x +7 $$apply the function to the entire "R2dataframe", store the results in a new dataframe ("R3dataframe"), and print the results and the summary statistics again.
###Code
# step 10 goes here!
###Output
_____no_output_____
###Markdown
12. STEP 11: Print the number of occurrences of each unique value in "Column 3"
###Code
# step 11 goes here!
###Output
_____no_output_____
###Markdown
13. STEP 12: Sort the data frame with respect to "Column 1" with a descending order and print it
###Code
# step 12 goes here!
###Output
_____no_output_____
###Markdown
14. STEP 13: Write the final format of the "R3dataframe" on a CSV file, named "Rfile.csv"
###Code
# step 13 goes here!
###Output
_____no_output_____
###Markdown
15. STEP14: Read the "Rfile.csv" and print its content.** __Make sure to attach the "Rfile.csv" file to your submission.__
###Code
# step 14 goes here!
###Output
_____no_output_____
###Markdown
--- Exercise 2: Precipitation records for Lubbock from 1895 to 2019 for the month of October is located in the file http://54.243.252.9/engr-1330-webroot/4-Databases/Lubbockdata.csv. Using this file:1. Download the file and read it into a dataframe2. Describe the entire data set. [Script]3. Produce a plot of year vs precipitation. [Script + Plot 1: data==blue]4. Split the data into two parts at the year 1960. [Script]5. Describe the two data series you have created. [Script]6. Plot the two series on the same plot. [Script + Plot 2: data1==blue, data2==green]
###Code
#Download the file and read it into a dataframe [Script]
#Describe the entire data set. [Script]
#Produce a plot of year vs precipitation. [Script + Plot 1: data==blue]
#Split the data into two parts at the year 1960. [Script]
#Describe the two data series you have created. [Script]
#Plot the two series on the same plot. [Script + Plot 2: data1==blue, data2==green]
###Output
_____no_output_____ |
notebooks/pca_corona_evolution.ipynb | ###Markdown
Run in Google Colab
###Code
import sys
import sklearn
import os
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.linear_model
import urllib.request
from sklearn import preprocessing
from sklearn import pipeline
%matplotlib inline
from ipywidgets import interactive
import ipywidgets as widgets
import urllib.request
corona_dataset_path = os.path.join("corona_dataset", "")
download_path = "https://raw.githubusercontent.com/AbdelMahm/CORONA/master/"
os.makedirs(corona_dataset_path, exist_ok=True)
for filename in ("time_series_19-covid-Confirmed.csv", "time_series_19-covid-Deaths.csv", "time_series_19-covid-Recovered.csv"):
print("Downloading", filename)
url = download_path + "corona_dataset/" + filename
urllib.request.urlretrieve(url, corona_dataset_path + filename)
df_confirmed = pd.read_csv(corona_dataset_path + "time_series_19-covid-Confirmed.csv", thousands=',')
df_deaths = pd.read_csv(corona_dataset_path + "time_series_19-covid-Deaths.csv", thousands=',')
df_recovered = pd.read_csv(corona_dataset_path + "time_series_19-covid-Recovered.csv", thousands=',')
df_confirmed.head()
df_deaths.head()
df_recovered.head()
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
def pca_move(day, country, compare_with, constant_size):
X = df_confirmed.iloc[:, 4:4+day+1]
X = StandardScaler().fit_transform(X)
pca = PCA(n_components=2)
principal_components_confirmed = pca.fit_transform(X)
df_principal_components_confirmed = pd.DataFrame(data = principal_components_confirmed
, columns = ['PC1', 'PC2'])
#print(pca.explained_variance_ratio_)
# Recovred
X = df_recovered.iloc[:, 4:4+day+1]
X = StandardScaler().fit_transform(X)
pca = PCA(n_components=2)
principal_components_recovered = pca.fit_transform(X)
df_principal_components_recovered = pd.DataFrame(data = principal_components_recovered
, columns = ['PC1', 'PC2'])
#Deaths
X = df_deaths.iloc[:, 4:4+day+1]
X = StandardScaler().fit_transform(X)
pca = PCA(n_components=2)
principal_components_deaths = pca.fit_transform(X)
df_principal_components_deaths= pd.DataFrame(data = principal_components_deaths
, columns = ['PC1', 'PC2'])
plt.figure(1, figsize = (8,8))
plt.ylim(-10, 20)
plt.xlim(-40, 300)
#fig = plt.figure(figsize = (8,8))
plt.xlabel('PC1', fontsize = 15)
plt.ylabel('PC2', fontsize = 15)
plt.title('2 component PCA', fontsize = 20)
df_pca_all = [df_principal_components_confirmed, df_principal_components_recovered, df_principal_components_deaths]
confirmed_size = df_confirmed.iloc[:, 4+day+1]
recovered_size = df_recovered.iloc[:, 4+day+1]
deaths_size = df_deaths.iloc[:, 4+day+1]
sizes_all = [confirmed_size, recovered_size, deaths_size]
targets = ['confirmed', 'recovered', 'deaths']
colors = ['r', 'g', 'b']
idx_country = df_confirmed[df_confirmed['Country/Region']==country].index.tolist()[0]
if compare_with == 'all':
for target, color, df_pca, sizes, add in zip(targets,colors, df_pca_all, sizes_all, [0, 20, 40]):
plt.scatter(df_pca.loc[:, 'PC1'] + add,
df_pca.loc[:, 'PC2'],
c = color,
s = 30 if constant_size else sizes/2 )
plt.annotate(country, (df_pca.loc[idx_country, 'PC1']+.03 + add, df_pca.loc[idx_country, 'PC2']+.03))
else:
idx_compare_with = df_confirmed[df_confirmed['Country/Region']==compare_with].index.tolist()[0]
for target, color, df_pca, sizes, add in zip(targets,colors, df_pca_all, sizes_all, [0, 20, 40]):
plt.scatter(df_pca.loc[[idx_country, idx_compare_with], 'PC1'] + add,
df_pca.loc[[idx_country, idx_compare_with], 'PC2'],
c = color,
s = 30 if constant_size else sizes/2 )
plt.annotate(country, (df_pca.loc[idx_country, 'PC1']+.03 + add, df_pca.loc[idx_country, 'PC2']+.03))
plt.annotate(compare_with, (df_pca.loc[idx_compare_with, 'PC1']+.03 + add, df_pca.loc[idx_compare_with, 'PC2']+.03))
lgnd = plt.legend(targets)
lgnd.legendHandles[0]._sizes = [10]
lgnd.legendHandles[1]._sizes = [10]
lgnd.legendHandles[2]._sizes = [10]
title = 'day:' + str(day)
plt.title(title)
plt.grid()
plt.show()
%matplotlib inline
interactive_plot = interactive(pca_move,
day =(1, 51), # day 0 = 1/22/20 ---> day 51 = (3/14/20)
country =['Morocco','China', 'Spain', 'Italy'],
compare_with =['Morocco','China', 'Spain', 'Italy', 'all'],
constant_size = [True, False])
interactive_plot
###Output
_____no_output_____ |
_as/2019/jp/00.ipynb | ###Markdown
[Applied Statistics](https://lamastex.github.io/scalable-data-science/as/2019/) 1MS926, Spring 2019, Uppsala University ©2019 Raazesh Sainudiin. [Attribution 4.0 International (CC BY 4.0)](https://creativecommons.org/licenses/by/4.0/) 00. Introduction 1. Introduction2. What is SageMath and why are we using it?* Interaction - learning/teaching style* What can you expect to get out of this course? IntroductionSee [Applied Statistics](https://lamastex.github.io/scalable-data-science/as/2019/) for learning outcomes, content, assessment, etc.**Assessment:**- Three Computer Lab Assessments (attendance will be taken in the Labs)- Final Exam will be in Computer Lab What is SageMath and why are we using it?We will be using Sage or [SageMath](http://www.sagemath.org/) for our *hands-on* work in this course. Sage is a free open-source mathematics software system licensed under the GPL. Sage can be used to study mathematics and statistics, including algebra, calculus, elementary to very advanced number theory, cryptography, commutative algebra, group theory, combinatorics, graph theory, exact linear algebra, optimization, interactive data visualization, randomized or Monte Carlo algorithms, scientific and statistical computing and much more. It combines various software packages into an integrative learning, teaching and research experience that is well suited for novice as well as professional researchers. Sage is a set of software libraries built on top of [Python](http://www.python.org/), a widely used general purpose programming language. Sage greatly enhance Python's already mathematically friendly nature. It is one of the languages used at Google, US National Aeronautic and Space Administration (NASA), US Jet Propulsion Laboratory (JPL), Industrial Light and Magic, YouTube, and other leading entities in industry and public sectors. Scientists, engineers, and mathematicians often find it well suited for their work. Obtain a more thorough rationale for Sage from Why Sage? and Success Stories, Testimonials and News Articles. Jump start your motivation by taking a Sage Feature Tour right now! Interaction - learning/teaching styleThis is an interactive jupyter notebook with SageMath interpreter and interactive means... VideosWe will embed relevant videos in the notebook, such as those from [The Khan Academy](http://www.khanacademy.org/) or open MOOCs from google, facebook, academia, etc.* [watch Google's Hal Varian's 'statistics is the dream' job speech](https://www.youtube.com/embed/D4FQsYTbLoI)* [watch UC Berkeley Professor Michael Jordan's speech on 'The Data Science Revolution'](https://youtu.be/ggq7HiDO0OU) LatexWe will *formally present mathematical and statistical concepts* in the Notebook using Latex as follows:$$ \sum_{i=1}^5 i = 1+2+3+4+5=15, \qquad \prod_{i=3}^6 i = 3 \times 4 \times 5 \times 6 = 360 $$$$ \binom{n}{k}:= \frac{n!}{k!(n-k)!}, \qquad \lim_{x \to \infty}\exp{(-x)} = 0 $$$$ \{\alpha, \beta, \gamma, \delta, \epsilon, \zeta, \mu,\theta, \vartheta, \phi, \varphi, \omega, \sigma, \varsigma,\Gamma, \Delta, \Theta, \Phi, \Omega\}, \qquad \forall x \in X, \quad \exists y \leq \epsilon, \ldots $$ Interactive VisualizationsWe will use interactive visualisations to convey concepts when possible. See the Taylor approximation below for a given order.
###Code
var('x')
x0 = 0
f = sin(x)*e^(-x)
p = plot(f,-1,5, thickness=2)
dot = point((x0,f(x=x0)),pointsize=80,rgbcolor=(1,0,0))
@interact
def _(order=[1..12]):
ft = f.taylor(x,x0,order)
pt = plot(ft,-1, 5, color='green', thickness=2)
pretty_print(html('$f(x)\;=\;%s$'%latex(f)))
pretty_print(html('$\hat{f}(x;%s)\;=\;%s+\mathcal{O}\
(x^{%s})$'%(x0,latex(ft),order+1)))
show(dot + p + pt, ymin = -.5, ymax = 1, figsize=[6,3])
###Output
_____no_output_____
###Markdown
Lab-Lecture Style of Teaching-LearningWe will *write computer programs* within code cells in the Notebook right after we learn the mathematical and statistical concepts. Thus, there is a significant overlap between traditional lectures and labs in this course -- in fact these interactions are *lab-lectures*. Live Data Explorations and ModelingLet us visualize the CO2 data, fetched from US NOAA, and do a simple linear regression.
###Code
# Author: Marshall Hampton
import urllib2 as U
import scipy.stats as Stat
from IPython.display import HTML
co2data = U.urlopen(\
'ftp://ftp.cmdl.noaa.gov/ccg/co2/trends/co2_mm_mlo.txt'\
).readlines()
datalines = []
for a_line in co2data:
if a_line.find('Creation:') != -1:
cdate = a_line
if a_line[0] != '#':
temp = a_line.replace('\n','').split(' ')
temp = [float(q) for q in temp if q != '']
datalines.append(temp)
trdf = RealField(16)
@interact
def mauna_loa_co2(start_date = slider(1958,2018,1,1958), \
end_date = slider(1958, 2018,1,2018)):
htmls1 = '<h3>CO2 monthly averages at Mauna Loa (interpolated),\
from NOAA/ESRL data</h3>'
htmls2 = '<h4>'+cdate+'</h4>'
sel_data = [[q[2],q[4]] for q in datalines if start_date < \
q[2] < end_date]
c_max = max([q[1] for q in sel_data])
c_min = min([q[1] for q in sel_data])
slope, intercept, r, ttprob, stderr = Stat.linregress(sel_data)
pretty_print(html(htmls1+htmls2+'<h4>Linear regression slope: '\
+ str(trdf(slope))+ \
' ppm/year; correlation coefficient: ' +\
str(trdf(r)) + '</h4>'))
var('x,y')
show(list_plot(sel_data, plotjoined=True, rgbcolor=(1,0,0))
+ plot(slope*x+intercept,start_date,end_date),
xmin = start_date, ymin = c_min-2, axes = True, \
xmax = end_date, ymax = c_max+3, \
frame = False, figsize=[8,3])
###Output
_____no_output_____
###Markdown
We will use publicly available resources generously!Here is an image of number systems from Wikipedia.We will also sometimes embed whole wikipedia pages. Expect cached wikipedia pages in your final exam. The curse will prepare you to think from facts in publicly available information.
###Code
def showURL(url, ht=500):
"""Return an IFrame of the url to show in notebook \
with height ht"""
from IPython.display import IFrame
return IFrame(url, width='95%', height=ht)
showURL('https://en.wikipedia.org/wiki/Number',400)
###Output
_____no_output_____
###Markdown
What can you expect to get out of this course?Strengthen your foundations in:- probability and statistics,- computing and data analysisin order to understand the probabilistic models and statistical inference procedures as well implement computer programs for processing raw data - a crucial distinguishing skillset of a *modern applied statistician*, i.e., a *data scientist* who knows her/his probabilistic and statistical foundations. What is Data Science?We will steer clear of academic/philosophical discussions on "what is data science?" and focus instead on the core skillset in mathematics, statistics and computing that is expected in a typical data science job today.
###Code
showURL("https://en.wikipedia.org/wiki/Data_science")
###Output
_____no_output_____
###Markdown
Course StructureThere will be 12 "lab-lectures" where I will be using `.ipynb` or IPython notebooks like this and 3 dedicated computer laboratories.We will start with basics of programming in BASH and a review of Python before recollecting concepts in probability and setting the stage for applied statistics, including, hypothesis testing and parameter estimation.**Connection to Statistical Machine Learning and AI:** An immediate application of paramter estimation in *statistical machine learning (SML)* and *artifical intelligence (AI)*, where you apply more sophisticated methods than what you will learn here, is *prediction*. This course will prepare you for SML and AI.**STS also stands for Society**As STS students, you have the natural responsibility to ask questions such as: *what is the cost of such sophisticated prediction algorithms on our society and planet?*Here your first assigned reading is from the following work:- [https://anatomyof.ai/](https://anatomyof.ai/), "Anatomy of an AI System" By Kate Crawford and Vladan Joler (2018)The Amazon Echo as an anatomical map of human labor, data and planetary resources. Download the detailed [ai-anatomy-map.pdf](https://anatomyof.ai/img/ai-anatomy-map.pdf). ------ Assignment 1, Problem 0Maximum Points = 3 Given that you are in the civil engineering programme in *systems in technology and society*, spend some time reading the following:- [https://anatomyof.ai/](https://anatomyof.ai/), "Anatomy of an AI System" By Kate Crawford and Vladan Joler (2018)Answer whether each of the following statements is `True` or `False` *according to the authors* by appropriately replacing `Xxxxx` coresponding to `TruthValueOfStatement0a`, `TruthValueOfStatement0b` and `TruthValueOfStatement0c`, respectively, in the next cell to demonstrate your reading comprehension.1. `Statement0a =` *Each small moment of convenience (provided by Amazon's Echo) – be it answering a question, turning on a light, or playing a song – requires a vast planetary network, fueled by the extraction of non-renewable materials, labor, and data.*- `Statement0b =` *The Echo user is simultaneously a consumer, a resource, a worker, and a product* - `Statement0c =` *Many of the assumptions about human life made by machine learning systems are narrow, normative and laden with error. Yet they are inscribing and building those assumptions into a new world, and will increasingly play a role in how opportunities, wealth, and knowledge are distributed.*
###Code
# Replace Xxxxx with True or False; Don't modify anything else in this cell!
TruthValueOfStatement0a = Xxxxx
TruthValueOfStatement0b = Xxxxx
TruthValueOfStatement0c = Xxxxx
# ASSIGNMENT 1, Test 0, POINTS 3
# Test locally to ensure an acceptable answer, True or False
assert(isinstance(TruthValueOfStatement0a, bool))
assert(isinstance(TruthValueOfStatement0b, bool))
assert(isinstance(TruthValueOfStatement0c, bool))
###Output
_____no_output_____
###Markdown
[Applied Statistics](https://lamastex.github.io/scalable-data-science/as/2019/) 1MS926, Spring 2019, Uppsala University ©2019 Raazesh Sainudiin. [Attribution 4.0 International (CC BY 4.0)](https://creativecommons.org/licenses/by/4.0/) 00. Introduction 1. Introduction2. What is SageMath and why are we using it?* Interaction - learning/teaching style* What can you expect to get out of this course? IntroductionSee [Applied Statistics](https://lamastex.github.io/scalable-data-science/as/2019/) for learning outcomes, content, assessment, etc.**Assessment:**- Three Computer Lab Assessments (attendance will be taken in the Labs)- Final Exam will be in Computer Lab What is SageMath and why are we using it?We will be using Sage or [SageMath](http://www.sagemath.org/) for our *hands-on* work in this course. Sage is a free open-source mathematics software system licensed under the GPL. Sage can be used to study mathematics and statistics, including algebra, calculus, elementary to very advanced number theory, cryptography, commutative algebra, group theory, combinatorics, graph theory, exact linear algebra, optimization, interactive data visualization, randomized or Monte Carlo algorithms, scientific and statistical computing and much more. It combines various software packages into an integrative learning, teaching and research experience that is well suited for novice as well as professional researchers. Sage is a set of software libraries built on top of [Python](http://www.python.org/), a widely used general purpose programming language. Sage greatly enhance Python's already mathematically friendly nature. It is one of the languages used at Google, US National Aeronautic and Space Administration (NASA), US Jet Propulsion Laboratory (JPL), Industrial Light and Magic, YouTube, and other leading entities in industry and public sectors. Scientists, engineers, and mathematicians often find it well suited for their work. Obtain a more thorough rationale for Sage from Why Sage? and Success Stories, Testimonials and News Articles. Jump start your motivation by taking a Sage Feature Tour right now! Interaction - learning/teaching styleThis is an interactive jupyter notebook with SageMath interpreter and interactive means... VideosWe will embed relevant videos in the notebook, such as those from [The Khan Academy](http://www.khanacademy.org/) or open MOOCs from google, facebook, academia, etc.* [watch Google's Hal Varian's 'statistics is the dream' job speech](https://www.youtube.com/embed/D4FQsYTbLoI)* [watch UC Berkeley Professor Michael Jordan's speech on 'The Data Science Revolution'](https://youtu.be/ggq7HiDO0OU) LatexWe will *formally present mathematical and statistical concepts* in the Notebook using Latex as follows:$$ \sum_{i=1}^5 i = 1+2+3+4+5=15, \qquad \prod_{i=3}^6 i = 3 \times 4 \times 5 \times 6 = 360 $$$$ \binom{n}{k}:= \frac{n!}{k!(n-k)!}, \qquad \lim_{x \to \infty}\exp{(-x)} = 0 $$$$ \{\alpha, \beta, \gamma, \delta, \epsilon, \zeta, \mu,\theta, \vartheta, \phi, \varphi, \omega, \sigma, \varsigma,\Gamma, \Delta, \Theta, \Phi, \Omega\}, \qquad \forall x \in X, \quad \exists y \leq \epsilon, \ldots $$ Interactive VisualizationsWe will use interactive visualisations to convey concepts when possible. See the Taylor approximation below for a given order.
###Code
var('x')
x0 = 0
f = sin(x)*e^(-x)
p = plot(f,-1,5, thickness=2)
dot = point((x0,f(x=x0)),pointsize=80,rgbcolor=(1,0,0))
@interact
def _(order=[1..12]):
ft = f.taylor(x,x0,order)
pt = plot(ft,-1, 5, color='green', thickness=2)
pretty_print(html('$f(x)\;=\;%s$'%latex(f)))
pretty_print(html('$\hat{f}(x;%s)\;=\;%s+\mathcal{O}\
(x^{%s})$'%(x0,latex(ft),order+1)))
show(dot + p + pt, ymin = -.5, ymax = 1, figsize=[6,3])
###Output
_____no_output_____
###Markdown
Lab-Lecture Style of Teaching-LearningWe will *write computer programs* within code cells in the Notebook right after we learn the mathematical and statistical concepts. Thus, there is a significant overlap between traditional lectures and labs in this course -- in fact these interactions are *lab-lectures*. Live Data Explorations and ModelingLet us visualize the CO2 data, fetched from US NOAA, and do a simple linear regression.
###Code
# Author: Marshall Hampton
import urllib2 as U
import scipy.stats as Stat
from IPython.display import HTML
co2data = U.urlopen(\
'ftp://ftp.cmdl.noaa.gov/ccg/co2/trends/co2_mm_mlo.txt'\
).readlines()
datalines = []
for a_line in co2data:
if a_line.find('Creation:') != -1:
cdate = a_line
if a_line[0] != '#':
temp = a_line.replace('\n','').split(' ')
temp = [float(q) for q in temp if q != '']
datalines.append(temp)
trdf = RealField(16)
@interact
def mauna_loa_co2(start_date = slider(1958,2018,1,1958), \
end_date = slider(1958, 2018,1,2018)):
htmls1 = '<h3>CO2 monthly averages at Mauna Loa (interpolated),\
from NOAA/ESRL data</h3>'
htmls2 = '<h4>'+cdate+'</h4>'
sel_data = [[q[2],q[4]] for q in datalines if start_date < \
q[2] < end_date]
c_max = max([q[1] for q in sel_data])
c_min = min([q[1] for q in sel_data])
slope, intercept, r, ttprob, stderr = Stat.linregress(sel_data)
pretty_print(html(htmls1+htmls2+'<h4>Linear regression slope: '\
+ str(trdf(slope))+ \
' ppm/year; correlation coefficient: ' +\
str(trdf(r)) + '</h4>'))
var('x,y')
show(list_plot(sel_data, plotjoined=True, rgbcolor=(1,0,0))
+ plot(slope*x+intercept,start_date,end_date),
xmin = start_date, ymin = c_min-2, axes = True, \
xmax = end_date, ymax = c_max+3, \
frame = False, figsize=[8,3])
###Output
_____no_output_____
###Markdown
We will use publicly available resources generously!Here is an image of number systems from Wikipedia.We will also sometimes embed whole wikipedia pages. Expect cached wikipedia pages in your final exam. The curse will prepare you to think from facts in publicly available information.
###Code
def showURL(url, ht=500):
"""Return an IFrame of the url to show in notebook \
with height ht"""
from IPython.display import IFrame
return IFrame(url, width='95%', height=ht)
showURL('https://en.wikipedia.org/wiki/Number',400)
###Output
_____no_output_____
###Markdown
What can you expect to get out of this course?Strengthen your foundations in:- probability and statistics,- computing and data analysisin order to understand the probabilistic models and statistical inference procedures as well implement computer programs for processing raw data - a crucial distinguishing skillset of a *modern applied statistician*, i.e., a *data scientist* who knows her/his probabilistic and statistical foundations. What is Data Science?We will steer clear of academic/philosophical discussions on "what is data science?" and focus instead on the core skillset in mathematics, statistics and computing that is expected in a typical data science job today.
###Code
showURL("https://en.wikipedia.org/wiki/Data_science")
###Output
_____no_output_____
###Markdown
Course StructureThere will be 12 "lab-lectures" where I will be using `.ipynb` or IPython notebooks like this and 3 dedicated computer laboratories.We will start with basics of programming in BASH and a review of Python before recollecting concepts in probability and setting the stage for applied statistics, including, hypothesis testing and parameter estimation.**Connection to Statistical Machine Learning and AI:** An immediate application of paramter estimation in *statistical machine learning (SML)* and *artifical intelligence (AI)*, where you apply more sophisticated methods than what you will learn here, is *prediction*. This course will prepare you for SML and AI.**STS also stands for Society**As STS students, you have the natural responsibility to ask questions such as: *what is the cost of such sophisticated prediction algorithms on our society and planet?*Here your first assigned reading is from the following work:- [https://anatomyof.ai/](https://anatomyof.ai/), "Anatomy of an AI System" By Kate Crawford and Vladan Joler (2018)The Amazon Echo as an anatomical map of human labor, data and planetary resources. Download the detailed [ai-anatomy-map.pdf](https://anatomyof.ai/img/ai-anatomy-map.pdf). --- Assignment 1, PROBLEM 0Maximum Points = 3 Given that you are in the civil engineering programme in *systems in technology and society*, spend some time reading the following:- [https://anatomyof.ai/](https://anatomyof.ai/), "Anatomy of an AI System" By Kate Crawford and Vladan Joler (2018)Answer whether each of the following statements is `True` or `False` *according to the authors* by appropriately replacing `Xxxxx` coresponding to `TruthValueOfStatement0a`, `TruthValueOfStatement0b` and `TruthValueOfStatement0c`, respectively, in the next cell to demonstrate your reading comprehension.1. `Statement0a =` *Each small moment of convenience (provided by Amazon's Echo) – be it answering a question, turning on a light, or playing a song – requires a vast planetary network, fueled by the extraction of non-renewable materials, labor, and data.*- `Statement0b =` *The Echo user is simultaneously a consumer, a resource, a worker, and a product* - `Statement0c =` *Many of the assumptions about human life made by machine learning systems are narrow, normative and laden with error. Yet they are inscribing and building those assumptions into a new world, and will increasingly play a role in how opportunities, wealth, and knowledge are distributed.*
###Code
# Replace Xxxxx with True or False; Don't modify anything else in this cell!
TruthValueOfStatement0a = Xxxxx
TruthValueOfStatement0b = Xxxxx
TruthValueOfStatement0c = Xxxxx
###Output
_____no_output_____
###Markdown
--- Local Test for Assignment 1, PROBLEM 0Evaluate cell below to make sure your answer is valid. You **should not** modify anything in the cell below when evaluating it to do a local test of your solution.
###Code
# Test locally to ensure an acceptable answer, True or False
try:
assert(isinstance(TruthValueOfStatement0a, bool))
assert(isinstance(TruthValueOfStatement0b, bool))
assert(isinstance(TruthValueOfStatement0c, bool))
print("Good, you have answered either True or False. Hopefully they are the correct answers!")
except AssertionError:
print("Try again. You are not writing True or False for your answers.")
###Output
Good, you have answered either True or False. Hopefully they are the correct answers!
|
qiskit_version/02_Measurements_and_Mixed_States.ipynb | ###Markdown
A measurement is a central concept in quantum mechanics. An easy way to think about it as a sample from a probability distribution: it is a random variable with a number of outcomes, each outcome is produced with a certain probability. Measurement connect the quantum world to our classical one: we cannot directly observe the quantum state in nature, we can only gather statistics about it with measurements. It sounds like a harsh boundary between a quantum and a classical system that can only be bridged by measurement. The reality is more subtle: unless a quantum system is perfectly isolated, it interacts with its surrounding environment. This leads to introduction of mixed states, which in one limit recover classical probabilities. More on the bra-ket notationBefore we take a deep dive into what measurements are, we need to introduce one more notation to complement the ket: it called a bra and it is denoted by $\langle\psi|$ for some quantum state $|\psi\rangle$. Together they form the bra-ket or Dirac notation. A bra is the conjugate transpose of a ket, and the other way around. This also means that a bra is a row vector. For instance, this is the bra for $|0\rangle$
###Code
import numpy as np
zero_ket = np.array([[1], [0]])
print("|0> ket:\n", zero_ket)
print("<0| bra:\n", zero_ket.T.conj())
###Output
|0> ket:
[[1]
[0]]
<0| bra:
[[1 0]]
###Markdown
This makes it very easy to write dot products: if we write a bra followed by a ket, that is exactly what the dot product is. This is so common that we often drop one of the vertical bars, and just write $\langle 0|0\rangle$, for instance. Since quantum states are normalized, the inner product of any quantum state with itself is always one:
###Code
zero_ket.T.conj() @ zero_ket
###Output
_____no_output_____
###Markdown
Similarly, orthogonal vectors always give 0. E.g. $\langle 0|1\rangle$:
###Code
one_ket = np.array([[0], [1]])
zero_ket.T.conj() @ one_ket
###Output
_____no_output_____
###Markdown
What about a ket and a bra? That is going to be a matrix: essentially the outer product of the two vectors. Here's $|0\rangle\langle 0|$:
###Code
zero_ket @ zero_ket.T.conj()
###Output
_____no_output_____
###Markdown
This should look familiar: it is a projection to the first element of the canonical basis. It is true in general that $|\psi\rangle\langle\psi|$ is going to be a projector to $|\psi\rangle$. It is very intuitive: take some other quantum state $|\phi\rangle$ and apply the matrix $|\psi\rangle\langle\psi|$ on it: $|\psi\rangle\langle\psi|\phi\rangle$. Now the right-most two terms are a bra and a ket, so it is a dot product: the overlap between $|\phi\rangle$ and $|\psi\rangle$. Since this is a scalar, it just scales the left-most term, which is the ket $|\psi\rangle$, so in effect, we projected $|\phi \rangle$ on this vector. MeasurementsA measurement in quantum mechanics is an operator-valued random variable. The theory of measurements is rich and countless questions about them are still waiting to be answered. Most quantum computers that we have today, however, only implement one very specific measurement, which makes our discussion a lot simpler. This measurement is in the canonical basis. In other words, the measurement contains two projections, $|0\rangle\langle 0|$ and $|1\rangle\langle 1|$, and this measurement can be applied to any of the qubits of the quantum computer.We already saw how applying a projection on a vector works. If we want to make a scalar value of that, we need to add a bra to the left. For instance, for some state $|\psi\rangle$, we get a scalar for $\langle\psi|0\rangle\langle 0|\psi\rangle$. This is called the expectation value of the operator $|0\rangle\langle 0|$. To put this in context, let us apply the projection $|0\rangle\langle 0|$ on the superposition $\frac{1}{\sqrt{2}}(|0\rangle + |1\rangle)$, which is the column vector $\frac{1}{\sqrt{2}}\begin{bmatrix} 1\\ 0\end{bmatrix}$.
###Code
ψ = np.array([[1], [0]])/np.sqrt(2)
Π_0 = zero_ket @ zero_ket.T.conj()
ψ.T.conj() @ Π_0 @ ψ
###Output
_____no_output_____
###Markdown
That is exactly one half, the square of the absolute value of the probability amplitude corresponding to $|0\rangle$ in the superposition! This is the mathematical formalism of what we had said earlier: given a state $|\psi\rangle = a_0|0\rangle + a_1|1\rangle$, we get an output $i$ with probability $|a_i|^2$. This is known as the *Born rule*. Now we have a recipe to extract probabilities with projections. This is exactly what is implemented in the quantum simulator. The measurement in the simulator is what we described here. Let's create an equal superposition with the Hadamard gate (see a later notebook for quantum circuits), apply the measurement, and observe the statistics:
###Code
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import execute
from qiskit import Aer
from qiskit.tools.visualization import plot_histogram
backend = Aer.get_backend('qasm_simulator')
q = QuantumRegister(1)
c = ClassicalRegister(1)
circuit = QuantumCircuit(q, c)
circuit.h(q[0])
circuit.measure(q, c)
job = execute(circuit, backend, shots=100)
plot_histogram(job.result().get_counts(circuit))
###Output
_____no_output_____
###Markdown
You see that the outcome is random, with roughly half of the outcomes being 0.There is something additional happening. The measurement has a random outcome, but once it is performed, the quantum state is in the corresponding basis vector. That is, the superposition is destroyed. This is referred to as the collapse of the wavefunction. It is the subject of many ongoing debates and research results how and why it happens, but what matters to us is that we can easily calculate the quantum state after the measurement. Just projecting it to the basis vector is insufficient, since that would not be normalized, so we have to renormalize it. Mathematically it is expressed by the somewhat convoluted expression $\frac{|i\rangle\langle i|\psi\rangle}{\sqrt{\langle\psi|i\rangle\langle i|\psi\rangle}}$ if we observe the output $i$. For instance, if we observe zero after measuring the superposition $\frac{1}{\sqrt{2}}(|0\rangle + |1\rangle)$, the state after the measurement will be
###Code
ψ = np.array([[np.sqrt(2)/2], [np.sqrt(2)/2]])
Π_0 = zero_ket @ zero_ket.T.conj()
probability_0 = ψ.T.conj() @ Π_0 @ ψ
Π_0 @ ψ/np.sqrt(probability_0)
###Output
_____no_output_____
###Markdown
which is just a very long way of saying we get $|0\rangle$.You can easily see this by putting two measurements in a sequence on the same qubit. The second one will always give the same outcome as the first. The first one is random, but the second one will be determined, since there will be no superposition in the computational basis after the first measurement. Let's simulate this by writing out the results of the two measurements into two different classical registers:
###Code
backend = Aer.get_backend('qasm_simulator')
c = ClassicalRegister(2)
circuit = QuantumCircuit(q, c)
circuit.h(q[0])
circuit.measure(q[0], c[0])
circuit.measure(q[0], c[1])
job = execute(circuit, backend, shots=100)
job.result().get_counts(circuit)
###Output
_____no_output_____
###Markdown
There is no output like 01 or 10. Measuring multiqubit systemsMost quantum computers implement local measurements, which means that each qubit is measured separately. So if we have a two qubit system where the first qubit is in the equal superposition and the second one is in $|0\rangle$, that is, we have the state $\frac{1}{\sqrt{2}}(|00\rangle + |01\rangle)$, we will observe 0 and 0 as outcomes of the measurements on the two qubits, or 0 and 1.
###Code
q = QuantumRegister(2)
c = ClassicalRegister(2)
circuit = QuantumCircuit(q, c)
circuit.h(q[0])
circuit.measure(q, c)
job = execute(circuit, backend, shots=100)
plot_histogram(job.result().get_counts(circuit))
###Output
_____no_output_____
###Markdown
What happens if we make measurements on an entangled state? Let's look at the statistics again on the $|\phi^+\rangle$ state:
###Code
q = QuantumRegister(2)
c = ClassicalRegister(2)
circuit = QuantumCircuit(q, c)
circuit.h(q[0])
circuit.cx(q[0], q[1])
circuit.measure(q, c)
job = execute(circuit, backend, shots=100)
plot_histogram(job.result().get_counts(circuit))
###Output
_____no_output_____
###Markdown
We only observe 00 and 11. Since the state is $\frac{1}{\sqrt{2}}(|00\rangle+|11\rangle)$, this should not come as a shock. Yet, there is something remarkable going on here. At the end of the last section, we saw the same statistics, but from measurements on the same qubit. Now we have two, spatially separate qubits exhibiting the same behaviour: this is a very strong form of correlations. This means that if we measure just one qubit, and get, say, 0 as the outcome, we *know* with certainty that if we measured the other qubit, we would also get 0, even though the second measurement is also a random variable.To appreciate this better, imagine that your are tossing two unbiased coins. If you observe heads on one, there is absolutely nothing that you can say about what the other one might be other than a wild guess that holds with probability 0.5. If you play foul and you biased the coins, you might improve your guessing accuracy. Yet you can never say with certainty what the other coin will be based on the outcome you observed on one coin, except for the trivial case when the other coin deterministically gives the same face always.Remarkable as it is, there is no activation or instantaneous (faster than the speed of light) signalling happening between the qubits, though. Your measurement was local to the qubit and so is your information. If there is somebody else doing the measurement on the other qubit, you would have to inform the person through classical communication channels that you happen to know what the outcome will be. So while we certainly cannot violate the theory of relativity with entanglement, this strong form of correlation is still central to many quantum algorithms. Mixed statesIf all was clear until now, this is where it gets messy. A ket and a bra is a projection, as we explained above. More than that, it is also a density matrix. A density matrix is another way of writing a quantum state, instead of kets. So, for instance we could write $\rho = |\psi\rangle\langle\psi|$, where $\rho$ is the density matrix for $|\psi\rangle$. The Born rule still applies, but now we have to take the trace of the result: for instance, $\mathrm{Tr}[|0\rangle\langle 0|\rho]$ would be the probability of seeing 0. See it in action:
###Code
ψ = np.array([[1], [1]])/np.sqrt(2)
ρ = ψ @ ψ.T.conj()
Π_0 = zero_ket @ zero_ket.T.conj()
np.trace(Π_0 @ ρ)
###Output
_____no_output_____
###Markdown
We get one half again. The renormalization after a measurement happens in a similar way: $\frac{|0\rangle\langle 0|\rho|0\rangle\langle 0|}{\mathrm{Tr}[|0\rangle\langle 0|\rho]}$.
###Code
probability_0 = np.trace(Π_0 @ ρ)
Π_0 @ ρ @ Π_0/probability_0
###Output
_____no_output_____
###Markdown
So why do we need this at all? Every state we have mentioned so far is called a *pure state*: these are kets or a density matrix created as a ket and a bra. There are other states called *mixed states*: these are *classical* probability distributions over pure states. Formally, a mixed state is written as $\sum_i p_i |\psi_i\rangle\langle\psi_i|$, where $\sum_i p_i=1$, $p_i\geq 0$. This reflects our classical ignorance over the underlying quantum states. Compare the density matrix of the equal superposition $\frac{1}{\sqrt{2}}(|0\rangle+|1\rangle)$ and the mixed state $0.5(|0\rangle\langle 0|+|1\rangle\langle 1|)$:
###Code
zero_ket = np.array([[1], [0]])
one_ket = np.array([[0], [1]])
ψ = (zero_ket + one_ket)/np.sqrt(2)
print("Density matrix of the equal superposition")
print(ψ @ ψ.T.conj())
print("Density matrix of the equally mixed state of |0><0| and |1><1|")
print((zero_ket @ zero_ket.T.conj()+one_ket @ one_ket.T.conj())/2)
###Output
Density matrix of the equal superposition
[[0.5 0.5]
[0.5 0.5]]
Density matrix of the equally mixed state of |0><0| and |1><1|
[[0.5 0. ]
[0. 0.5]]
###Markdown
A measurement is a central concept in quantum mechanics. An easy way to think about it as a sample from a probability distribution: it is a random variable with a number of outcomes, each outcome is produced with a certain probability. Measurement connect the quantum world to our classical one: we cannot directly observe the quantum state in nature, we can only gather statistics about it with measurements. It sounds like a harsh boundary between a quantum and a classical system that can only be bridged by measurement. The reality is more subtle: unless a quantum system is perfectly isolated, it interacts with its surrounding environment. This leads to introduction of mixed states, which in one limit recover classical probabilities. More on the bra-ket notationBefore we take a deep dive into what measurements are, we need to introduce one more notation to complement the ket: it called a bra and it is denoted by $\langle\psi|$ for some quantum state $|\psi\rangle$. Together they form the bra-ket or Dirac notation. A bra is the conjugate transpose of a ket, and the other way around. This also means that a bra is a row vector. For instance, this is the bra for $|0\rangle$
###Code
import numpy as np
zero_ket = np.array([[1], [0]])
print("|0> ket:\n", zero_ket)
print("<0| bra:\n", zero_ket.T.conj())
###Output
_____no_output_____
###Markdown
This makes it very easy to write dot products: if we write a bra followed by a ket, that is exactly what the dot product is. This is so common that we often drop one of the vertical bars, and just write $\langle 0|0\rangle$, for instance. Since quantum states are normalized, the inner product of any quantum state with itself is always one:
###Code
zero_ket.T.conj().dot(zero_ket)
###Output
_____no_output_____
###Markdown
Similarly, orthogonal vectors always give 0. E.g. $\langle 0|1\rangle$:
###Code
one_ket = np.array([[0], [1]])
zero_ket.T.conj().dot(one_ket)
###Output
_____no_output_____
###Markdown
What about a ket and a bra? That is going to be a matrix: essentially the outer product of the two vectors. Here's $|0\rangle\langle 0|$:
###Code
zero_ket.dot(zero_ket.T.conj())
###Output
_____no_output_____
###Markdown
This should look familiar: it is a projection to the first element of the canonical basis. It is true in general that $|\psi\rangle\langle\psi|$ is going to be a projector to $|\psi\rangle$. It is very intuitive: take some other quantum state $|\phi\rangle$ and apply the matrix $|\psi\rangle\langle\psi|$ on it: $|\psi\rangle\langle\psi|\phi\rangle$. Now the right-most two terms are a bra and a ket, so it is a dot product: the overlap between $|\phi\rangle$ and $|\psi\rangle$. Since this is a scalar, it just scales the left-most term, which is the ket $|\psi\rangle$, so in effect, we projected $|\phi \rangle$ on this vector. MeasurementsA measurement in quantum mechanics is an operator-valued random variable. The theory of measurements is rich and countless questions about them are still waiting to be answered. Most quantum computers that we have today, however, only implement one very specific measurement, which makes our discussion a lot simpler. This measurement is in the canonical basis. In other words, the measurement contains two projections, $|0\rangle\langle 0|$ and $|1\rangle\langle 1|$, and this measurement can be applied to any of the qubits of the quantum computer.We already saw how applying a projection on a vector works. If we want to make a scalar value of that, we need to add a bra to the left. For instance, for some state $|\psi\rangle$, we get a scalar for $\langle\psi|0\rangle\langle 0|\psi\rangle$. This is called the expectation value of the operator $|0\rangle\langle 0|$. To put this in context, let us apply the projection $|0\rangle\langle 0|$ on the superposition $\frac{1}{\sqrt{2}}(|0\rangle + |1\rangle)$, which is the column vector $\frac{1}{\sqrt{2}}\begin{bmatrix} 1\\ 1\end{bmatrix}$.
###Code
ψ = np.array([[1], [1]])/np.sqrt(2)
Π_0 = zero_ket.dot(zero_ket.T.conj())
ψ.T.conj().dot(Π_0.dot(ψ))
###Output
_____no_output_____
###Markdown
That is exactly one half, the square of the absolute value of the probability amplitude corresponding to $|0\rangle$ in the superposition! This is the mathematical formalism of what we had said earlier: given a state $|\psi\rangle = a_0|0\rangle + a_1|1\rangle$, we get an output $i$ with probability $|a_i|^2$. This is known as the *Born rule*. Now we have a recipe to extract probabilities with projections. This is exactly what is implemented in the quantum simulator. The measurement in the simulator is what we described here. Let's create an equal superposition with the Hadamard gate (see a later notebook for quantum circuits), apply the measurement, and observe the statistics:
###Code
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import execute
from qiskit import BasicAer
from qiskit.tools.visualization import plot_histogram
backend = BasicAer.get_backend('qasm_simulator')
q = QuantumRegister(1)
c = ClassicalRegister(1)
circuit = QuantumCircuit(q, c)
circuit.h(q[0])
circuit.measure(q, c)
job = execute(circuit, backend, shots=100)
plot_histogram(job.result().get_counts(circuit))
###Output
_____no_output_____
###Markdown
You see that the outcome is random, with roughly half of the outcomes being 0.There is something additional happening. The measurement has a random outcome, but once it is performed, the quantum state is in the corresponding basis vector. That is, the superposition is destroyed. This is referred to as the collapse of the wavefunction. It is the subject of many ongoing debates and research results how and why it happens, but what matters to us is that we can easily calculate the quantum state after the measurement. Just projecting it to the basis vector is insufficient, since that would not be normalized, so we have to renormalize it. Mathematically it is expressed by the somewhat convoluted expression $\frac{|i\rangle\langle i|\psi\rangle}{\sqrt{\langle\psi|i\rangle\langle i|\psi\rangle}}$ if we observe the output $i$. For instance, if we observe zero after measuring the superposition $\frac{1}{\sqrt{2}}(|0\rangle + |1\rangle)$, the state after the measurement will be
###Code
ψ = np.array([[np.sqrt(2)/2], [np.sqrt(2)/2]])
Π_0 = zero_ket.dot(zero_ket.T.conj())
probability_0 = ψ.T.conj().dot(Π_0.dot(ψ))
Π_0.dot(ψ)/np.sqrt(probability_0)
###Output
_____no_output_____
###Markdown
which is just a very long way of saying we get $|0\rangle$.You can easily see this by putting two measurements in a sequence on the same qubit. The second one will always give the same outcome as the first. The first one is random, but the second one will be determined, since there will be no superposition in the computational basis after the first measurement. Let's simulate this by writing out the results of the two measurements into two different classical registers:
###Code
c = ClassicalRegister(2)
circuit = QuantumCircuit(q, c)
circuit.h(q[0])
circuit.measure(q[0], c[0])
circuit.measure(q[0], c[1])
job = execute(circuit, backend, shots=100)
job.result().get_counts(circuit)
###Output
_____no_output_____
###Markdown
There is no output like 01 or 10. Measuring multiqubit systemsMost quantum computers implement local measurements, which means that each qubit is measured separately. So if we have a two qubit system where the first qubit is in the equal superposition and the second one is in $|0\rangle$, that is, we have the state $\frac{1}{\sqrt{2}}(|00\rangle + |01\rangle)$, we will observe 0 and 0 as outcomes of the measurements on the two qubits, or 0 and 1.
###Code
q = QuantumRegister(2)
c = ClassicalRegister(2)
circuit = QuantumCircuit(q, c)
circuit.h(q[0])
circuit.measure(q, c)
job = execute(circuit, backend, shots=100)
plot_histogram(job.result().get_counts(circuit))
###Output
_____no_output_____
###Markdown
What happens if we make measurements on an entangled state? Let's look at the statistics again on the $|\phi^+\rangle$ state:
###Code
q = QuantumRegister(2)
c = ClassicalRegister(2)
circuit = QuantumCircuit(q, c)
circuit.h(q[0])
circuit.cx(q[0], q[1])
circuit.measure(q, c)
job = execute(circuit, backend, shots=100)
plot_histogram(job.result().get_counts(circuit))
###Output
_____no_output_____
###Markdown
We only observe 00 and 11. Since the state is $\frac{1}{\sqrt{2}}(|00\rangle+|11\rangle)$, this should not come as a shock. Yet, there is something remarkable going on here. At the end of the last section, we saw the same statistics, but from measurements on the same qubit. Now we have two, spatially separate qubits exhibiting the same behaviour: this is a very strong form of correlations. This means that if we measure just one qubit, and get, say, 0 as the outcome, we *know* with certainty that if we measured the other qubit, we would also get 0, even though the second measurement is also a random variable.To appreciate this better, imagine that your are tossing two unbiased coins. If you observe heads on one, there is absolutely nothing that you can say about what the other one might be other than a wild guess that holds with probability 0.5. If you play foul and you biased the coins, you might improve your guessing accuracy. Yet you can never say with certainty what the other coin will be based on the outcome you observed on one coin, except for the trivial case when the other coin deterministically gives the same face always.Remarkable as it is, there is no activation or instantaneous (faster than the speed of light) signalling happening between the qubits, though. Your measurement was local to the qubit and so is your information. If there is somebody else doing the measurement on the other qubit, you would have to inform the person through classical communication channels that you happen to know what the outcome will be. So while we certainly cannot violate the theory of relativity with entanglement, this strong form of correlation is still central to many quantum algorithms. Mixed statesIf all was clear until now, this is where it gets messy. A ket and a bra is a projection, as we explained above. More than that, it is also a density matrix. A density matrix is another way of writing a quantum state, instead of kets. So, for instance we could write $\rho = |\psi\rangle\langle\psi|$, where $\rho$ is the density matrix for $|\psi\rangle$. The Born rule still applies, but now we have to take the trace of the result: for instance, $\mathrm{Tr}[|0\rangle\langle 0|\rho]$ would be the probability of seeing 0. See it in action:
###Code
ψ = np.array([[1], [1]])/np.sqrt(2)
ρ = ψ.dot(ψ.T.conj())
Π_0 = zero_ket.dot(zero_ket.T.conj())
np.trace(Π_0.dot(ρ))
###Output
_____no_output_____
###Markdown
We get one half again. The renormalization after a measurement happens in a similar way: $\frac{|0\rangle\langle 0|\rho|0\rangle\langle 0|}{\mathrm{Tr}[|0\rangle\langle 0|\rho]}$.
###Code
probability_0 = np.trace(Π_0.dot(ρ))
Π_0.dot(ρ).dot(Π_0)/probability_0
###Output
_____no_output_____
###Markdown
So why do we need this at all? Every state we have mentioned so far is called a *pure state*: these are kets or a density matrix created as a ket and a bra. There are other states called *mixed states*: these are *classical* probability distributions over pure states. Formally, a mixed state is written as $\sum_i p_i |\psi_i\rangle\langle\psi_i|$, where $\sum_i p_i=1$, $p_i\geq 0$. This reflects our classical ignorance over the underlying quantum states. Compare the density matrix of the equal superposition $\frac{1}{\sqrt{2}}(|0\rangle+|1\rangle)$ and the mixed state $0.5(|0\rangle\langle 0|+|1\rangle\langle 1|)$:
###Code
zero_ket = np.array([[1], [0]])
one_ket = np.array([[0], [1]])
ψ = (zero_ket + one_ket)/np.sqrt(2)
print("Density matrix of the equal superposition")
print(ψ.dot(ψ.T.conj()))
print("Density matrix of the equally mixed state of |0><0| and |1><1|")
print((zero_ket.dot(zero_ket.T.conj())+one_ket.dot(one_ket.T.conj()))/2)
###Output
_____no_output_____
###Markdown
A measurement is a central concept in quantum mechanics. An easy way to think about it as a sample from a probability distribution: it is a random variable with a number of outcomes, each outcome is produced with a certain probability. Measurement connect the quantum world to our classical one: we cannot directly observe the quantum state in nature, we can only gather statistics about it with measurements. It sounds like a harsh boundary between a quantum and a classical system that can only be bridged by measurement. The reality is more subtle: unless a quantum system is perfectly isolated, it interacts with its surrounding environment. This leads to introduction of mixed states, which in one limit recover classical probabilities. More on the bra-ket notationBefore we take a deep dive into what measurements are, we need to introduce one more notation to complement the ket: it called a bra and it is denoted by $\langle\psi|$ for some quantum state $|\psi\rangle$. Together they form the bra-ket or Dirac notation. A bra is the conjugate transpose of a ket, and the other way around. This also means that a bra is a row vector. For instance, this is the bra for $|0\rangle$
###Code
import numpy as np
zero_ket = np.array([[1], [0]])
print("|0> ket:\n", zero_ket)
print("<0| bra:\n", zero_ket.T.conj())
###Output
|0> ket:
[[1]
[0]]
<0| bra:
[[1 0]]
###Markdown
This makes it very easy to write dot products: if we write a bra followed by a ket, that is exactly what the dot product is. This is so common that we often drop one of the vertical bars, and just write $\langle 0|0\rangle$, for instance. Since quantum states are normalized, the inner product of any quantum state with itself is always one:
###Code
zero_ket.T.conj() @ zero_ket
###Output
_____no_output_____
###Markdown
Similarly, orthogonal vectors always give 0. E.g. $\langle 0|1\rangle$:
###Code
one_ket = np.array([[0], [1]])
zero_ket.T.conj() @ one_ket
###Output
_____no_output_____
###Markdown
What about a ket and a bra? That is going to be a matrix: essentially the outer product of the two vectors. Here's $|0\rangle\langle 0|$:
###Code
zero_ket @ zero_ket.T.conj()
###Output
_____no_output_____
###Markdown
This should look familiar: it is a projection to the first element of the canonical basis. It is true in general that $|\psi\rangle\langle\psi|$ is going to be a projector to $|\psi\rangle$. It is very intuitive: take some other quantum state $|\phi\rangle$ and apply the matrix $|\psi\rangle\langle\psi|$ on it: $|\psi\rangle\langle\psi|\phi\rangle$. Now the right-most two terms are a bra and a ket, so it is a dot product: the overlap between $|\phi\rangle$ and $|\psi\rangle$. Since this is a scalar, it just scales the left-most term, which is the ket $|\psi\rangle$, so in effect, we projected $|\phi \rangle$ on this vector. MeasurementsA measurement in quantum mechanics is an operator-valued random variable. The theory of measurements is rich and countless questions about them are still waiting to be answered. Most quantum computers that we have today, however, only implement one very specific measurement, which makes our discussion a lot simpler. This measurement is in the canonical basis. In other words, the measurement contains two projections, $|0\rangle\langle 0|$ and $|1\rangle\langle 1|$, and this measurement can be applied to any of the qubits of the quantum computer.We already saw how applying a projection on a vector works. If we want to make a scalar value of that, we need to add a bra to the left. For instance, for some state $|\psi\rangle$, we get a scalar for $\langle\psi|0\rangle\langle 0|\psi\rangle$. This is called the expectation value of the operator $|0\rangle\langle 0|$. To put this in context, let us apply the projection $|0\rangle\langle 0|$ on the superposition $\frac{1}{\sqrt{2}}(|0\rangle + |1\rangle)$, which is the column vector $\frac{1}{\sqrt{2}}\begin{bmatrix} 1\\ 0\end{bmatrix}$.
###Code
ψ = np.array([[1], [0]])/np.sqrt(2)
Π_0 = zero_ket @ zero_ket.T.conj()
ψ.T.conj() @ Π_0 @ ψ
###Output
_____no_output_____
###Markdown
That is exactly one half, the square of the absolute value of the probability amplitude corresponding to $|0\rangle$ in the superposition! This is the mathematical formalism of what we had said earlier: given a state $|\psi\rangle = a_0|0\rangle + a_1|1\rangle$, we get an output $i$ with probability $|a_i|^2$. This is known as the *Born rule*. Now we have a recipe to extract probabilities with projections. This is exactly what is implemented in the quantum simulator. The measurement in the simulator is what we described here. Let's create an equal superposition with the Hadamard gate (see a later notebook for quantum circuits), apply the measurement, and observe the statistics:
###Code
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import execute
from qiskit import Aer
from qiskit.tools.visualization import plot_histogram
backend = Aer.get_backend('qasm_simulator')
q = QuantumRegister(1)
c = ClassicalRegister(1)
circuit = QuantumCircuit(q, c)
circuit.h(q[0])
circuit.measure(q, c)
job = execute(circuit, backend, shots=100)
plot_histogram(job.result().get_counts(circuit))
###Output
_____no_output_____
###Markdown
You see that the outcome is random, with roughly half of the outcomes being 0.There is something additional happening. The measurement has a random outcome, but once it is performed, the quantum state is in the corresponding basis vector. That is, the superposition is destroyed. This is referred to as the collapse of the wavefunction. It is the subject of many ongoing debates and research results how and why it happens, but what matters to us is that we can easily calculate the quantum state after the measurement. Just projecting it to the basis vector is insufficient, since that would not be normalized, so we have to renormalize it. Mathematically it is expressed by the somewhat convoluted expression $\frac{|i\rangle\langle i|\psi\rangle}{\sqrt{\langle\psi|i\rangle\langle i|\psi\rangle}}$ if we observe the output $i$. For instance, if we observe zero after measuring the superposition $\frac{1}{\sqrt{2}}(|0\rangle + |1\rangle)$, the state after the measurement will be
###Code
ψ = np.array([[np.sqrt(2)/2], [np.sqrt(2)/2]])
Π_0 = zero_ket @ zero_ket.T.conj()
probability_0 = ψ.T.conj() @ Π_0 @ ψ
Π_0 @ ψ/np.sqrt(probability_0)
###Output
_____no_output_____
###Markdown
which is just a very long way of saying we get $|0\rangle$.You can easily see this by putting two measurements in a sequence on the same qubit. The second one will always give the same outcome as the first. The first one is random, but the second one will be determined, since there will be no superposition in the computational basis after the first measurement. Let's simulate this by writing out the results of the two measurements into two different classical registers:
###Code
backend = Aer.get_backend('qasm_simulator')
c = ClassicalRegister(2)
circuit = QuantumCircuit(q, c)
circuit.h(q[0])
circuit.measure(q[0], c[0])
circuit.measure(q[0], c[1])
job = execute(circuit, backend, shots=100)
job.result().get_counts(circuit)
###Output
_____no_output_____
###Markdown
There is no output like 01 or 10. Measuring multiqubit systemsMost quantum computers implement local measurements, which means that each qubit is measured separately. So if we have a two qubit system where the first qubit is in the equal superposition and the second one is in $|0\rangle$, that is, we have the state $\frac{1}{\sqrt{2}}(|00\rangle + |01\rangle)$, we will observe 0 and 0 as outcomes of the measurements on the two qubits, or 0 and 1.
###Code
q = QuantumRegister(2)
c = ClassicalRegister(2)
circuit = QuantumCircuit(q, c)
circuit.h(q[0])
circuit.measure(q, c)
job = execute(circuit, backend, shots=100)
plot_histogram(job.result().get_counts(circuit))
###Output
_____no_output_____
###Markdown
What happens if we make measurements on an entangled state? Let's look at the statistics again on the $|\phi^+\rangle$ state:
###Code
q = QuantumRegister(2)
c = ClassicalRegister(2)
circuit = QuantumCircuit(q, c)
circuit.h(q[0])
circuit.cx(q[0], q[1])
circuit.measure(q, c)
job = execute(circuit, backend, shots=100)
plot_histogram(job.result().get_counts(circuit))
###Output
_____no_output_____
###Markdown
We only observe 00 and 11. Since the state is $\frac{1}{\sqrt{2}}(|00\rangle+|11\rangle)$, this should not come as a shock. Yet, there is something remarkable going on here. At the end of the last section, we saw the same statistics, but from measurements on the same qubit. Now we have two, spatially separate qubits exhibiting the same behaviour: this is a very strong form of correlations. This means that if we measure just one qubit, and get, say, 0 as the outcome, we *know* with certainty that if we measured the other qubit, we would also get 0, even though the second measurement is also a random variable.To appreciate this better, imagine that your are tossing two unbiased coins. If you observe heads on one, there is absolutely nothing that you can say about what the other one might be other than a wild guess that holds with probability 0.5. If you play foul and you biased the coins, you might improve your guessing accuracy. Yet you can never say with certainty what the other coin will be based on the outcome you observed on one coin, except for the trivial case when the other coin deterministically gives the same face always.Remarkable as it is, there is no activation or instantaneous (faster than the speed of light) signalling happening between the qubits, though. Your measurement was local to the qubit and so is your information. If there is somebody else doing the measurement on the other qubit, you would have to inform the person through classical communication channels that you happen to know what the outcome will be. So while we certainly cannot violate the theory of relativity with entanglement, this strong form of correlation is still central to many quantum algorithms. Mixed statesIf all was clear until now, this is where it gets messy. A ket and a bra is a projection, as we explained above. More than that, it is also a density matrix. A density matrix is another way of writing a quantum state, instead of kets. So, for instance we could write $\rho = |\psi\rangle\langle\psi|$, where $\rho$ is the density matrix for $|\psi\rangle$. The Born rule still applies, but now we have to take the trace of the result: for instance, $\mathrm{Tr}[|0\rangle\langle 0|\rho]$ would be the probability of seeing 0. See it in action:
###Code
ψ = np.array([[1], [1]])/np.sqrt(2)
ρ = ψ @ ψ.T.conj()
Π_0 = zero_ket @ zero_ket.T.conj()
np.trace(Π_0 @ ρ)
###Output
_____no_output_____
###Markdown
We get one half again. The renormalization after a measurement happens in a similar way: $\frac{|0\rangle\langle 0|\rho|0\rangle\langle 0|}{\mathrm{Tr}[|0\rangle\langle 0|\rho]}$.
###Code
probability_0 = np.trace(Π_0 @ ρ)
Π_0 @ ρ @ Π_0/probability_0
###Output
_____no_output_____
###Markdown
So why do we need this at all? Every state we have mentioned so far is called a *pure state*: these are kets or a density matrix created as a ket and a bra. There are other states called *mixed states*: these are *classical* probability distributions over pure states. Formally, a mixed state is written as $\sum_i p_i |\psi_i\rangle\langle\psi_i|$, where $\sum_i p_i=1$, $p_i\geq 0$. This reflects our classical ignorance over the underlying quantum states. Compare the density matrix of the equal superposition $\frac{1}{\sqrt{2}}(|0\rangle+|1\rangle)$ and the mixed state $0.5(|0\rangle\langle 0|+|1\rangle\langle 1|)$:
###Code
zero_ket = np.array([[1], [0]])
one_ket = np.array([[0], [1]])
ψ = (zero_ket + one_ket)/np.sqrt(2)
print("Density matrix of the equal superposition")
print(ψ @ ψ.T.conj())
print("Density matrix of the equally mixed state of |0><0| and |1><1|")
print((zero_ket @ zero_ket.T.conj()+one_ket @ one_ket.T.conj())/2)
###Output
_____no_output_____ |
split labels.ipynb | ###Markdown
split each file into a group in a list
###Code
gb = full_labels.groupby('filename')
grouped_list = [gb.get_group(x) for x in gb.groups]
len(grouped_list)
train_index = np.random.choice(len(grouped_list), size=160, replace=False)
test_index = np.setdiff1d(list(range(200)), train_index)
len(train_index), len(test_index)
# take first 200 files
train = pd.concat([grouped_list[i] for i in train_index])
test = pd.concat([grouped_list[i] for i in test_index])
len(train), len(test)
train.to_csv('train_labels.csv', index=None)
test.to_csv('test_labels.csv', index=None)
###Output
_____no_output_____
###Markdown
split each file into a group in a list
###Code
gb = full_labels.groupby('filename')
grouped_list = [gb.get_group(x) for x in gb.groups]
len(grouped_list)
train_index = np.random.choice(len(grouped_list), size=160, replace=False)
test_index = np.setdiff1d(list(range(200)), train_index)
len(train_index), len(test_index)
# take first 200 files
train = pd.concat([grouped_list[i] for i in train_index])
test = pd.concat([grouped_list[i] for i in test_index])
len(train), len(test)
train.to_csv('train_labels.csv', index=None)
test.to_csv('test_labels.csv', index=None)
###Output
_____no_output_____
###Markdown
split each file into a group in a list
###Code
gb = full_labels.groupby('filename')
grouped_list = [gb.get_group(x) for x in gb.groups]
len(grouped_list)
train_index = np.random.choice(len(grouped_list), size=160, replace=False)
test_index = np.setdiff1d(list(range(200)), train_index)
len(train_index), len(test_index)
# take first 200 files
train = pd.concat([grouped_list[i] for i in train_index])
test = pd.concat([grouped_list[i] for i in test_index])
len(train), len(test)
train.to_csv('train_labels.csv', index=None)
test.to_csv('test_labels.csv', index=None)
###Output
_____no_output_____
###Markdown
split each file into a group in a list
###Code
gb = full_labels.groupby('filename')
grouped_list = [gb.get_group(x) for x in gb.groups]
len(grouped_list)
train_index = np.random.choice(len(grouped_list), size=160, replace=False)
test_index = np.setdiff1d(list(range(200)), train_index)
len(train_index), len(test_index)
# take first 200 files
train = pd.concat([grouped_list[i] for i in train_index])
test = pd.concat([grouped_list[i] for i in test_index])
len(train), len(test)
train.to_csv('train_labels.csv', index=None)
test.to_csv('test_labels.csv', index=None)
###Output
_____no_output_____
###Markdown
split each file into a group in a list
###Code
gb = full_labels.groupby('filename')
grouped_list = [gb.get_group(x) for x in gb.groups]
len(grouped_list)
train_index = np.random.choice(len(grouped_list), size=160, replace=False)
test_index = np.setdiff1d(list(range(200)), train_index)
len(train_index), len(test_index)
# take first 200 files
train = pd.concat([grouped_list[i] for i in train_index])
test = pd.concat([grouped_list[i] for i in test_index])
len(train), len(test)
train.to_csv('train_labels.csv', index=None)
test.to_csv('test_labels.csv', index=None)
###Output
_____no_output_____
###Markdown
split each file into a group in a list
###Code
gb = full_labels.groupby('filename')
grouped_list = [gb.get_group(x) for x in gb.groups]
len(grouped_list)
train_index = np.random.choice(len(grouped_list), size=160, replace=False)
test_index = np.setdiff1d(list(range(200)), train_index)
len(train_index), len(test_index)
# take first 200 files
train = pd.concat([grouped_list[i] for i in train_index])
test = pd.concat([grouped_list[i] for i in test_index])
len(train), len(test)
train.to_csv('train_labels.csv', index=None)
test.to_csv('test_labels.csv', index=None)
###Output
_____no_output_____ |
approaches.ipynb | ###Markdown
*missing:Number_weeks_used* > *64 unique values* train: *79858/88858* test: *53417/59310*
###Code
train['Crop_Damage'].value_counts().plot(kind='bar');
train['Crop_Damage'].value_counts()
plt.hist(train['Number_Weeks_Used']);
p=train['Number_Weeks_Used']+train['Number_Weeks_Quit']
p.value_counts().plot(kind='bar',figsize=(30,10));
nulltr= train[train['Number_Weeks_Used'].isnull()].index.tolist()
for i in nulltr:
if train['Number_Doses_Week'][i]==0:
train['Number_Weeks_Used'][i]=0
else:
train['Number_Weeks_Used'][i]=42-train['Number_Weeks_Quit'][i]
p=test['Number_Weeks_Used']+test['Number_Weeks_Quit']
p.value_counts().plot(kind='bar',figsize=(30,10));
nullte= test[test['Number_Weeks_Used'].isnull()].index.tolist()
for i in nullte:
if test['Number_Doses_Week'][i]==0:
test['Number_Weeks_Used'][i]=0
else:
test['Number_Weeks_Used'][i]=42-test['Number_Weeks_Quit'][i]
test.info()
import seaborn as sns
corr = train.corr()
# Generate a mask for the upper triangle
mask = np.triu(np.ones_like(corr, dtype=np.bool))
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
t= f.suptitle('Correlation Heatmap', fontsize=20)
# Draw the heatmap with the mask and correct aspect ratio
#sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.5,
# center=0,square=True, linewidths=.5, cbar_kws={"shrink": .5})
hm = sns.heatmap(corr, annot=True, ax=ax, cmap=cmap,fmt='.2f',
linewidths=.1,cbar_kws={"shrink": .7})
dep_var = 'Crop_Damage'
cat_names = [ 'Pesticide_Use_Category' ]
cont_names = ['Estimated_Insects_Count', 'Number_Doses_Week', 'Number_Weeks_Used', 'Number_Weeks_Quit']
###Output
_____no_output_____
###Markdown
***using SVC***
###Code
y=train['Crop_Damage']
y.shape
train=train.drop(columns=['Crop_Damage','ID'])
test=test.drop(columns='ID')
print(train.shape)
train.head()
test.shape
X=pd.get_dummies(train,columns=cat_names)
X.shape
Xt=pd.get_dummies(test,columns=cat_names)
Xt.shape
###Output
_____no_output_____
###Markdown
** **
###Code
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
class_weights= {0:0.89897850367626425, 1:1.709531557182564, 2:4.725742928750448}
from sklearn.svm import SVC
classifier2 = SVC(kernel = 'rbf', random_state =0,class_weight=class_weights ,degree=2,C=10)
classifier2.fit(x_train, y_train)
y_pred= classifier2.predict(x_test)
from sklearn.metrics import confusion_matrix
cm=confusion_matrix(y_test,y_pred)
cm
from sklearn.metrics import f1_score, accuracy_score
print('f1-score :', f1_score(y_test,y_pred, average='weighted'))
print('accuracy:', accuracy_score(y_test,y_pred))
#CV
from xgboost import XGBClassifier
classifier2=SVC(kernel = 'rbf', random_state =0,class_weight=class_weights ,degree=2,C=10)
classifier2.fit(x_train, y_train)
from sklearn.model_selection import cross_val_score
acc=cross_val_score(estimator=classifier2, X=X , y=y , cv=10)
print(acc)
print(acc.mean())
acc.std()
###Output
_____no_output_____
###Markdown
Random Forest
###Code
class_weights= {0:0.89897850367626425, 1:1.709531557182564, 2:4.725742928750448}
from sklearn.ensemble import RandomForestClassifier
classifier3 = RandomForestClassifier(n_estimators = 25,max_depth=4, criterion = 'entropy', class_weight=random_state = 0)
classifier3.fit(x_train, y_train)
y_pred= classifier3.predict(x_test)
from sklearn.metrics import confusion_matrix
cm=confusion_matrix(y_test,y_pred)
cm
from sklearn.metrics import f1_score, accuracy_score
print('f1-score :', f1_score(y_test,y_pred, average='weighted'))
print('accuracy:', accuracy_score(y_test,y_pred))
###Output
_____no_output_____
###Markdown
###Code
from sklearn.utils import class_weight
'''class_weights = list(class_weight.compute_class_weight('balanced',
np.unique(y_train),
y_train))'''
class_weights= [0.89897850367626425, 1.409531557182564, 2.025742928750448]
w_array = np.ones(y_train.shape[0], dtype = 'float')
for i, val in enumerate(y_train):
w_array[i] = class_weights[val-1]
set(w_array)
class_weights
from xgboost import XGBClassifier
classifier4=XGBClassifier(n_estimators=625,reg_lambda=0.22,max_depth=5,tree_method='gpu_hist')
classifier4.fit(x_train, y_train,sample_weight=w_array)
y_pred= classifier4.predict(x_test)
from sklearn.metrics import confusion_matrix
cm=confusion_matrix(y_test,y_pred)
cm
from sklearn.metrics import f1_score, accuracy_score
print('f1-score :', f1_score(y_test,y_pred, average='weighted'))
print('accuracy:', accuracy_score(y_test,y_pred))
#CV
from xgboost import XGBClassifier
classifier4=XGBClassifier(n_estimators=625,reg_lambda=0.1,max_depth=4,tree_method='gpu_hist')
classifier4.fit(x_train, y_train)
from sklearn.model_selection import cross_val_score
acc=cross_val_score(estimator=classifier4, X=X , y=y , cv=10)
print(acc)
print(acc.mean())
acc.std()
from sklearn.datasets import make_classification
from lightgbm import LGBMClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.metrics import classification_report
model = LGBMClassifier(boosting_type='gbdt',objective='multiclass',num_class=3,metric='multi_logloss',subsample=0.8,
n_estimators=665,max_depth=10,learning_rate=0.03,min_data_in_leaf=10,
lambda_l2=0.3,random_seed=42,class_weight= {0: 0.5, 1: 0.36, 2: 0.3},verbose=0)
cv = RepeatedStratifiedKFold(n_splits=3, n_repeats=3, random_state=1)
n_scores = cross_val_score(model, x_train, y_train, scoring='accuracy', cv=cv, n_jobs=-1, error_score='raise')
print("done")
print('Accuracy: %.3f (%.3f)' % (np.mean(n_scores), np.std(n_scores)))
model.fit(x_train, y_train)
yhat = model.predict(x_test)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, yhat)
cm
(14713+309)/177772
target_names = ['alive', 'damaged by pesticides', 'damaged by other reason']
print(classification_report(y_test, yhat,target_names=target_names))
ys=model.predict(Xt)
s=pd.read_csv('/content/test.csv')
sub=pd.DataFrame(s['ID'])
sub.head()
sub['Crop_Damage']=ys
sub.head()
sub['Crop_Damage'].value_counts()
sub.to_csv("sub.csv",index=False)
###Output
_____no_output_____ |
t81_558_class_09_4_transfer_nlp.ipynb | ###Markdown
T81-558: Applications of Deep Neural Networks**Module 9: Transfer Learning*** Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/). Module 9 Material* Part 9.1: Introduction to Keras Transfer Learning [[Video]](https://www.youtube.com/watch?v=WLlP6S-Z8Xs&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_1_keras_transfer.ipynb)* Part 9.2: Popular Pretrained Neural Networks for Keras [[Video]](https://www.youtube.com/watch?v=ctVA1_46YEE&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_2_popular_transfer.ipynb)* Part 9.3: Transfer Learning for Computer Vision and Keras [[Video]](https://www.youtube.com/watch?v=61vMUm_XBMI&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_3_transfer_cv.ipynb)* **Part 9.4: Transfer Learning for Languages and Keras** [[Video]](https://www.youtube.com/watch?v=ajmAAg9FxXA&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_4_transfer_nlp.ipynb)* Part 9.5: Transfer Learning for Keras Feature Engineering [[Video]](https://www.youtube.com/watch?v=Dttxsm8zpL8&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_5_transfer_feature_eng.ipynb) Google CoLab InstructionsThe following code ensures that Google CoLab is running the correct version of TensorFlow.
###Code
try:
%tensorflow_version 2.x
COLAB = True
print("Note: using Google CoLab")
except:
print("Note: not using Google CoLab")
COLAB = False
###Output
Note: not using Google CoLab
###Markdown
Part 9.4: Transfer Learning for Languages and KerasYou will commonly use transfer learning in conjunction with Natural Language Processing (NLP). This course has an entire module that covers NLP. However, we will look at how you can load a network into Keras for NLP via transfer learning. The following three sources were helpful for the creation of this section.* Universal sentence encoder [[Cite:cer2018universal]](https://arxiv.org/abs/1803.11175). arXiv preprint arXiv:1803.11175)* Deep Transfer Learning for Natural Language Processing: Text Classification with Universal Embeddings [[Cite:howard2018universal]](https://towardsdatascience.com/deep-transfer-learning-for-natural-language-processing-text-classification-with-universal-1a2c69e5baa9)* [Keras Tutorial: How to Use Google's Universal Sentence Encoder for Spam Classification](http://hunterheidenreich.com/blog/google-universal-sentence-encoder-in-keras/)These examples make use of TensorFlow Hub, which allows pretrained models to be loaded into TensorFlow easily. To install TensorHub use the following commands.
###Code
!pip install tensorflow_hub
###Output
Collecting tensorflow_hub
Downloading tensorflow_hub-0.9.0-py2.py3-none-any.whl (103 kB)
Requirement already satisfied: six>=1.12.0 in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_hub) (1.15.0)
Requirement already satisfied: protobuf>=3.8.0 in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_hub) (3.12.3)
Requirement already satisfied: numpy>=1.12.0 in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_hub) (1.18.5)
Requirement already satisfied: setuptools in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from protobuf>=3.8.0->tensorflow_hub) (49.2.0.post20200714)
Installing collected packages: tensorflow-hub
Successfully installed tensorflow-hub-0.9.0
###Markdown
It is also necessary to install TensorFlow Datasets, which you can install \with the following command.
###Code
!pip install tensorflow_datasets
###Output
Collecting tensorflow_datasets
Downloading tensorflow_datasets-3.2.1-py3-none-any.whl (3.4 MB)
Requirement already satisfied: requests>=2.19.0 in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (2.24.0)
Requirement already satisfied: wrapt in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.12.1)
Collecting dill
Downloading dill-0.3.2.zip (177 kB)
Requirement already satisfied: attrs>=18.1.0 in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (19.3.0)
Requirement already satisfied: absl-py in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (0.9.0)
Requirement already satisfied: protobuf>=3.6.1 in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (3.12.3)
Collecting promise
Downloading promise-2.3.tar.gz (19 kB)
Requirement already satisfied: numpy in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.18.5)
Collecting tensorflow-metadata
Downloading tensorflow_metadata-0.23.0-py3-none-any.whl (43 kB)
Requirement already satisfied: future in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (0.18.2)
Requirement already satisfied: six in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.15.0)
Requirement already satisfied: tqdm in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (4.47.0)
Requirement already satisfied: termcolor in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.1.0)
Requirement already satisfied: certifi>=2017.4.17 in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (2020.6.20)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (1.24.3)
Requirement already satisfied: chardet<4,>=3.0.2 in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (3.0.4)
Requirement already satisfied: idna<3,>=2.5 in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (2.10)
Requirement already satisfied: setuptools in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from protobuf>=3.6.1->tensorflow_datasets) (49.2.0.post20200714)
Collecting googleapis-common-protos
Downloading googleapis_common_protos-1.52.0-py2.py3-none-any.whl (100 kB)
Building wheels for collected packages: dill, promise
Building wheel for dill (setup.py): started
Building wheel for dill (setup.py): finished with status 'done'
Created wheel for dill: filename=dill-0.3.2-py3-none-any.whl size=78977 sha256=c9ee55462820f4e66c44c76f46eb499453725a7339436b18eb0947153d0d2592
Stored in directory: c:\users\jeffh\appdata\local\pip\cache\wheels\72\6b\d5\5548aa1b73b8c3d176ea13f9f92066b02e82141549d90e2100
Building wheel for promise (setup.py): started
Building wheel for promise (setup.py): finished with status 'done'
###Markdown
Load the Internet Movie DataBase (IMDB) reviews data set. This example is based on a TensorFlow example that you can [find here](https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/tf2_text_classification.ipynbscrollTo=2ew7HTbPpCJH).
###Code
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
train_data, test_data = tfds.load(name="imdb_reviews",
split=["train", "test"],
batch_size=-1, as_supervised=True)
train_examples, train_labels = tfds.as_numpy(train_data)
test_examples, test_labels = tfds.as_numpy(test_data)
# /Users/jheaton/tensorflow_datasets/imdb_reviews/plain_text/0.1.0
###Output
[1mDownloading and preparing dataset imdb_reviews/plain_text/1.0.0 (download: Unknown size, generated: Unknown size, total: Unknown size) to C:\Users\jeffh\tensorflow_datasets\imdb_reviews\plain_text\1.0.0...[0m
###Markdown
Load a pretrained embedding model called [gnews-swivel-20dim](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1). This network was trained by Google on GNEWS data and can convert RAW text into vectors.
###Code
model = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1"
hub_layer = hub.KerasLayer(model, output_shape=[20], input_shape=[],
dtype=tf.string, trainable=True)
###Output
_____no_output_____
###Markdown
Consider the following three movie reviews.
###Code
train_examples[:3]
###Output
_____no_output_____
###Markdown
The embedding layer can convert each to 20-number vectors.
###Code
hub_layer(train_examples[:3])
###Output
_____no_output_____
###Markdown
We add additional layers to attempt to classify the movie reviews as either positive or negative.
###Code
model = tf.keras.Sequential()
model.add(hub_layer)
model.add(tf.keras.layers.Dense(16, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
keras_layer (KerasLayer) (None, 20) 400020
_________________________________________________________________
dense (Dense) (None, 16) 336
_________________________________________________________________
dense_1 (Dense) (None, 1) 17
=================================================================
Total params: 400,373
Trainable params: 400,373
Non-trainable params: 0
_________________________________________________________________
###Markdown
Compile the neural network.
###Code
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Split and train the neural network.
###Code
x_val = train_examples[:10000]
partial_x_train = train_examples[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
###Output
Train on 15000 samples, validate on 10000 samples
Epoch 1/40
15000/15000 [==============================] - 3s 189us/sample - loss: 0.6388 - accuracy: 0.6433 - val_loss: 0.5910 - val_accuracy: 0.6937
Epoch 2/40
15000/15000 [==============================] - 2s 143us/sample - loss: 0.5626 - accuracy: 0.7191 - val_loss: 0.5495 - val_accuracy: 0.7295
Epoch 3/40
15000/15000 [==============================] - 2s 143us/sample - loss: 0.5173 - accuracy: 0.7573 - val_loss: 0.5138 - val_accuracy: 0.7585
Epoch 4/40
15000/15000 [==============================] - 2s 145us/sample - loss: 0.4774 - accuracy: 0.7839 - val_loss: 0.4809 - val_accuracy: 0.7832
Epoch 5/40
15000/15000 [==============================] - 2s 146us/sample - loss: 0.4393 - accuracy: 0.8106 - val_loss: 0.4546 - val_accuracy: 0.7973
Epoch 6/40
15000/15000 [==============================] - 2s 146us/sample - loss: 0.4038 - accuracy: 0.8305 - val_loss: 0.4239 - val_accuracy: 0.8155
Epoch 7/40
15000/15000 [==============================] - 2s 144us/sample - loss: 0.3702 - accuracy: 0.8500 - val_loss: 0.3997 - val_accuracy: 0.8288
Epoch 8/40
15000/15000 [==============================] - 2s 143us/sample - loss: 0.3392 - accuracy: 0.8642 - val_loss: 0.3811 - val_accuracy: 0.8371
Epoch 9/40
15000/15000 [==============================] - 2s 144us/sample - loss: 0.3133 - accuracy: 0.8787 - val_loss: 0.3601 - val_accuracy: 0.8462
Epoch 10/40
15000/15000 [==============================] - 2s 144us/sample - loss: 0.2876 - accuracy: 0.8895 - val_loss: 0.3457 - val_accuracy: 0.8534
Epoch 11/40
15000/15000 [==============================] - 2s 143us/sample - loss: 0.2653 - accuracy: 0.9009 - val_loss: 0.3341 - val_accuracy: 0.8602
Epoch 12/40
15000/15000 [==============================] - 2s 142us/sample - loss: 0.2454 - accuracy: 0.9103 - val_loss: 0.3267 - val_accuracy: 0.8635
Epoch 13/40
15000/15000 [==============================] - 2s 143us/sample - loss: 0.2280 - accuracy: 0.9178 - val_loss: 0.3174 - val_accuracy: 0.8676
Epoch 14/40
15000/15000 [==============================] - 2s 142us/sample - loss: 0.2117 - accuracy: 0.9259 - val_loss: 0.3119 - val_accuracy: 0.8701
Epoch 15/40
15000/15000 [==============================] - 2s 143us/sample - loss: 0.1978 - accuracy: 0.9321 - val_loss: 0.3074 - val_accuracy: 0.8715
Epoch 16/40
15000/15000 [==============================] - 2s 141us/sample - loss: 0.1847 - accuracy: 0.9371 - val_loss: 0.3059 - val_accuracy: 0.8714
Epoch 17/40
15000/15000 [==============================] - 2s 141us/sample - loss: 0.1725 - accuracy: 0.9426 - val_loss: 0.3024 - val_accuracy: 0.8739
Epoch 18/40
15000/15000 [==============================] - 2s 143us/sample - loss: 0.1616 - accuracy: 0.9471 - val_loss: 0.3079 - val_accuracy: 0.8725
Epoch 19/40
15000/15000 [==============================] - 2s 142us/sample - loss: 0.1508 - accuracy: 0.9519 - val_loss: 0.3022 - val_accuracy: 0.8747
Epoch 20/40
15000/15000 [==============================] - 2s 145us/sample - loss: 0.1406 - accuracy: 0.9567 - val_loss: 0.3026 - val_accuracy: 0.8745
Epoch 21/40
15000/15000 [==============================] - 2s 143us/sample - loss: 0.1317 - accuracy: 0.9593 - val_loss: 0.3043 - val_accuracy: 0.8760
Epoch 22/40
15000/15000 [==============================] - 2s 141us/sample - loss: 0.1237 - accuracy: 0.9629 - val_loss: 0.3058 - val_accuracy: 0.8763
Epoch 23/40
15000/15000 [==============================] - 2s 142us/sample - loss: 0.1154 - accuracy: 0.9668 - val_loss: 0.3081 - val_accuracy: 0.8765
Epoch 24/40
15000/15000 [==============================] - 2s 141us/sample - loss: 0.1077 - accuracy: 0.9697 - val_loss: 0.3116 - val_accuracy: 0.8773
Epoch 25/40
15000/15000 [==============================] - 2s 140us/sample - loss: 0.1011 - accuracy: 0.9737 - val_loss: 0.3153 - val_accuracy: 0.8759
Epoch 26/40
15000/15000 [==============================] - 2s 141us/sample - loss: 0.0945 - accuracy: 0.9761 - val_loss: 0.3179 - val_accuracy: 0.8762
Epoch 27/40
15000/15000 [==============================] - 2s 143us/sample - loss: 0.0886 - accuracy: 0.9787 - val_loss: 0.3224 - val_accuracy: 0.8757
Epoch 28/40
15000/15000 [==============================] - 2s 141us/sample - loss: 0.0826 - accuracy: 0.9805 - val_loss: 0.3275 - val_accuracy: 0.8750
Epoch 29/40
15000/15000 [==============================] - 2s 141us/sample - loss: 0.0768 - accuracy: 0.9828 - val_loss: 0.3322 - val_accuracy: 0.8744
Epoch 30/40
15000/15000 [==============================] - 2s 140us/sample - loss: 0.0718 - accuracy: 0.9851 - val_loss: 0.3376 - val_accuracy: 0.8743
Epoch 31/40
15000/15000 [==============================] - 2s 141us/sample - loss: 0.0672 - accuracy: 0.9860 - val_loss: 0.3430 - val_accuracy: 0.8747
Epoch 32/40
15000/15000 [==============================] - 2s 140us/sample - loss: 0.0625 - accuracy: 0.9885 - val_loss: 0.3506 - val_accuracy: 0.8723
Epoch 33/40
15000/15000 [==============================] - 2s 141us/sample - loss: 0.0583 - accuracy: 0.9889 - val_loss: 0.3546 - val_accuracy: 0.8734
Epoch 34/40
15000/15000 [==============================] - 2s 141us/sample - loss: 0.0546 - accuracy: 0.9903 - val_loss: 0.3619 - val_accuracy: 0.8732
Epoch 35/40
15000/15000 [==============================] - 2s 142us/sample - loss: 0.0509 - accuracy: 0.9916 - val_loss: 0.3679 - val_accuracy: 0.8713
Epoch 36/40
15000/15000 [==============================] - 2s 142us/sample - loss: 0.0473 - accuracy: 0.9923 - val_loss: 0.3751 - val_accuracy: 0.8721
Epoch 37/40
15000/15000 [==============================] - 2s 141us/sample - loss: 0.0441 - accuracy: 0.9938 - val_loss: 0.3817 - val_accuracy: 0.8707
Epoch 38/40
15000/15000 [==============================] - 2s 143us/sample - loss: 0.0410 - accuracy: 0.9945 - val_loss: 0.3888 - val_accuracy: 0.8700
Epoch 39/40
15000/15000 [==============================] - 2s 144us/sample - loss: 0.0384 - accuracy: 0.9949 - val_loss: 0.3966 - val_accuracy: 0.8707
Epoch 40/40
15000/15000 [==============================] - 2s 145us/sample - loss: 0.0359 - accuracy: 0.9955 - val_loss: 0.4033 - val_accuracy: 0.8684
###Markdown
Evaluate the neural network.
###Code
results = model.evaluate(test_data, test_labels)
print(results)
history_dict = history.history
history_dict.keys()
%matplotlib inline
import matplotlib.pyplot as plt
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
T81-558: Applications of Deep Neural Networks**Module 9: Regularization: L1, L2 and Dropout*** Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/). Module 9 Material* Part 9.1: Introduction to Keras Transfer Learning [[Video]](https://www.youtube.com/watch?v=WLlP6S-Z8Xs&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_1_keras_transfer.ipynb)* Part 9.2: Popular Pretrained Neural Networks for Keras [[Video]](https://www.youtube.com/watch?v=ctVA1_46YEE&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_2_popular_transfer.ipynb)* Part 9.3: Transfer Learning for Computer Vision and Keras [[Video]](https://www.youtube.com/watch?v=61vMUm_XBMI&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_3_transfer_cv.ipynb)* **Part 9.4: Transfer Learning for Languages and Keras** [[Video]](https://www.youtube.com/watch?v=ajmAAg9FxXA&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_4_transfer_nlp.ipynb)* Part 9.5: Transfer Learning for Keras Feature Engineering [[Video]](https://www.youtube.com/watch?v=Dttxsm8zpL8&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_5_transfer_feature_eng.ipynb) Google CoLab InstructionsThe following code ensures that Google CoLab is running the correct version of TensorFlow.
###Code
try:
%tensorflow_version 2.x
COLAB = True
print("Note: using Google CoLab")
except:
print("Note: not using Google CoLab")
COLAB = False
###Output
Note: not using Google CoLab
###Markdown
Part 9.4: Transfer Learning for Languages and KerasTransfer learning is commonly used with Natural Language Processing (NLP). This course has an entire module that covers NLP. However, for now we will look how a NLP network can be loaded into Keras for transfer learning. The following three sources were helpful for the creation of this section.* Cer, D., Yang, Y., Kong, S. Y., Hua, N., Limtiaco, N., John, R. S., ... & Sung, Y. H. (2018). [Universal sentence encoder](https://arxiv.org/abs/1803.11175). arXiv preprint arXiv:1803.11175.* [Deep Transfer Learning for Natural Language Processing: Text Classification with Universal Embeddings](https://towardsdatascience.com/deep-transfer-learning-for-natural-language-processing-text-classification-with-universal-1a2c69e5baa9)* [Keras Tutorial: How to Use Google's Universal Sentence Encoder for Spam Classification](http://hunterheidenreich.com/blog/google-universal-sentence-encoder-in-keras/)These examples make use of TensorFlow Hub, which allows pretrained models to easily be loaded into TensorFlow. To install TensorHub use the following command.
###Code
!pip install tensorflow_hub
###Output
Collecting tensorflow_hub
Using cached https://files.pythonhosted.org/packages/00/0e/a91780d07592b1abf9c91344ce459472cc19db3b67fdf3a61dca6ebb2f5c/tensorflow_hub-0.7.0-py2.py3-none-any.whl
Requirement already satisfied: six>=1.10.0 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_hub) (1.13.0)
Requirement already satisfied: protobuf>=3.4.0 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_hub) (3.11.2)
Requirement already satisfied: numpy>=1.12.0 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_hub) (1.17.4)
Requirement already satisfied: setuptools in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from protobuf>=3.4.0->tensorflow_hub) (42.0.2.post20191203)
Installing collected packages: tensorflow-hub
Successfully installed tensorflow-hub-0.7.0
###Markdown
It is also necessary to install TensorFlow Datasets. This can be done with the following command.
###Code
!pip install tensorflow_datasets
###Output
Collecting tensorflow_datasets
Using cached https://files.pythonhosted.org/packages/8b/2e/77823b3754af2eb2ca6f3676f91741fafa44bc653cfafc267c70afd7355f/tensorflow_datasets-1.3.2-py3-none-any.whl
Requirement already satisfied: tqdm in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (4.40.2)
Requirement already satisfied: future in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (0.18.2)
Requirement already satisfied: attrs in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (19.3.0)
Collecting tensorflow-metadata
Using cached https://files.pythonhosted.org/packages/3b/0c/afb81ea6998f6e26521671585d1cd9d3f7945a8b9834764e91757453dc25/tensorflow_metadata-0.15.2-py2.py3-none-any.whl
Collecting promise
Using cached https://files.pythonhosted.org/packages/cf/9c/fb5d48abfe5d791cd496e4242ebcf87a4bb2e0c3dcd6e0ae68c11426a528/promise-2.3.tar.gz
Requirement already satisfied: termcolor in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.1.0)
Requirement already satisfied: wrapt in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.11.2)
Requirement already satisfied: absl-py in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (0.8.1)
Requirement already satisfied: protobuf>=3.6.1 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (3.11.2)
Requirement already satisfied: six in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.13.0)
Requirement already satisfied: numpy in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.17.4)
Requirement already satisfied: requests>=2.19.0 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (2.22.0)
Collecting dill
Using cached https://files.pythonhosted.org/packages/c7/11/345f3173809cea7f1a193bfbf02403fff250a3360e0e118a1630985e547d/dill-0.3.1.1.tar.gz
Collecting googleapis-common-protos
Using cached https://files.pythonhosted.org/packages/eb/ee/e59e74ecac678a14d6abefb9054f0bbcb318a6452a30df3776f133886d7d/googleapis-common-protos-1.6.0.tar.gz
Requirement already satisfied: setuptools in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from protobuf>=3.6.1->tensorflow_datasets) (42.0.2.post20191203)
Requirement already satisfied: certifi>=2017.4.17 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (2019.11.28)
Requirement already satisfied: chardet<3.1.0,>=3.0.2 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (3.0.4)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (1.24.3)
Requirement already satisfied: idna<2.9,>=2.5 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (2.8)
Building wheels for collected packages: promise, dill, googleapis-common-protos
Building wheel for promise (setup.py): started
Building wheel for promise (setup.py): finished with status 'done'
Created wheel for promise: filename=promise-2.3-cp37-none-any.whl size=21500 sha256=45ca497494456f4971c99c6b3e5e4df9410bf5a2f634e93b02b6771ba9475c20
Stored in directory: C:\Users\jheaton\AppData\Local\pip\Cache\wheels\19\49\34\c3c1e78bcb954c49e5ec0d31784fe63d14d427f316b12fbde9
Building wheel for dill (setup.py): started
Building wheel for dill (setup.py): finished with status 'done'
Created wheel for dill: filename=dill-0.3.1.1-cp37-none-any.whl size=78598 sha256=c65010bd933b8ee5e37109931467dad265b5053f88d714bb4aab373a2755e78b
Stored in directory: C:\Users\jheaton\AppData\Local\pip\Cache\wheels\59\b1\91\f02e76c732915c4015ab4010f3015469866c1eb9b14058d8e7
Building wheel for googleapis-common-protos (setup.py): started
Building wheel for googleapis-common-protos (setup.py): finished with status 'done'
Created wheel for googleapis-common-protos: filename=googleapis_common_protos-1.6.0-cp37-none-any.whl size=77585 sha256=a6964bed5912b68bd2b1887a9c8d8b637a49af9aa029c23ebbae36905fafee92
Stored in directory: C:\Users\jheaton\AppData\Local\pip\Cache\wheels\9e\3d\a2\1bec8bb7db80ab3216dbc33092bb7ccd0debfb8ba42b5668d5
Successfully built promise dill googleapis-common-protos
Installing collected packages: googleapis-common-protos, tensorflow-metadata, promise, dill, tensorflow-datasets
Successfully installed dill-0.3.1.1 googleapis-common-protos-1.6.0 promise-2.3 tensorflow-datasets-1.3.2 tensorflow-metadata-0.15.2
###Markdown
https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/tf2_text_classification.ipynbscrollTo=2ew7HTbPpCJH Load the Internet Movie DataBase (IMDB) reviews data set.
###Code
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
train_data, test_data = tfds.load(name="imdb_reviews", split=["train", "test"],
batch_size=-1, as_supervised=True)
train_examples, train_labels = tfds.as_numpy(train_data)
test_examples, test_labels = tfds.as_numpy(test_data)
# /Users/jheaton/tensorflow_datasets/imdb_reviews/plain_text/0.1.0
###Output
[1mDownloading and preparing dataset imdb_reviews (80.23 MiB) to C:\Users\jheaton\tensorflow_datasets\imdb_reviews\plain_text\0.1.0...[0m
###Markdown
Load a pretrained embedding model called [gnews-swivel-20dim](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1). This was trained by Google on gnews data and can convert RAW text into vectors.
###Code
model = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1"
hub_layer = hub.KerasLayer(model, output_shape=[20], input_shape=[],
dtype=tf.string, trainable=True)
###Output
_____no_output_____
###Markdown
Consider the following 3 movie reviews.
###Code
train_examples[:3]
###Output
_____no_output_____
###Markdown
The embedding layer can convert each to 20-number vectors.
###Code
hub_layer(train_examples[:3])
###Output
_____no_output_____
###Markdown
We add addition layers to attempt to classify the movie reviews as either positive or negative.
###Code
model = tf.keras.Sequential()
model.add(hub_layer)
model.add(tf.keras.layers.Dense(16, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
keras_layer (KerasLayer) (None, 20) 400020
_________________________________________________________________
dense (Dense) (None, 16) 336
_________________________________________________________________
dense_1 (Dense) (None, 1) 17
=================================================================
Total params: 400,373
Trainable params: 400,373
Non-trainable params: 0
_________________________________________________________________
###Markdown
Compile the neural network.
###Code
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Split and train the neural network.
###Code
x_val = train_examples[:10000]
partial_x_train = train_examples[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
###Output
Train on 15000 samples, validate on 10000 samples
Epoch 1/40
15000/15000 [==============================] - 6s 379us/sample - loss: 0.6585 - accuracy: 0.6415 - val_loss: 0.6056 - val_accuracy: 0.6739
Epoch 2/40
15000/15000 [==============================] - 4s 272us/sample - loss: 0.5759 - accuracy: 0.7069 - val_loss: 0.5587 - val_accuracy: 0.7135
Epoch 3/40
15000/15000 [==============================] - 4s 266us/sample - loss: 0.5284 - accuracy: 0.7452 - val_loss: 0.5209 - val_accuracy: 0.7444
Epoch 4/40
15000/15000 [==============================] - 4s 267us/sample - loss: 0.4847 - accuracy: 0.7751 - val_loss: 0.4853 - val_accuracy: 0.7706oss: 0.4852 - accuracy: 0.77
Epoch 5/40
15000/15000 [==============================] - 4s 264us/sample - loss: 0.4425 - accuracy: 0.8038 - val_loss: 0.4547 - val_accuracy: 0.7879
Epoch 6/40
15000/15000 [==============================] - 4s 257us/sample - loss: 0.4033 - accuracy: 0.8279 - val_loss: 0.4214 - val_accuracy: 0.8090
Epoch 7/40
15000/15000 [==============================] - 4s 262us/sample - loss: 0.3679 - accuracy: 0.8496 - val_loss: 0.3983 - val_accuracy: 0.8191
Epoch 8/40
15000/15000 [==============================] - 4s 260us/sample - loss: 0.3387 - accuracy: 0.8635 - val_loss: 0.3757 - val_accuracy: 0.8323
Epoch 9/40
15000/15000 [==============================] - 4s 264us/sample - loss: 0.3111 - accuracy: 0.8779 - val_loss: 0.3649 - val_accuracy: 0.8371
Epoch 10/40
15000/15000 [==============================] - 4s 261us/sample - loss: 0.2890 - accuracy: 0.8879 - val_loss: 0.3502 - val_accuracy: 0.8466
Epoch 11/40
15000/15000 [==============================] - 4s 268us/sample - loss: 0.2658 - accuracy: 0.9000 - val_loss: 0.3353 - val_accuracy: 0.8539
Epoch 12/40
15000/15000 [==============================] - 4s 266us/sample - loss: 0.2462 - accuracy: 0.9089 - val_loss: 0.3265 - val_accuracy: 0.8594
Epoch 13/40
15000/15000 [==============================] - 4s 266us/sample - loss: 0.2300 - accuracy: 0.9163 - val_loss: 0.3196 - val_accuracy: 0.8638
Epoch 14/40
15000/15000 [==============================] - 4s 261us/sample - loss: 0.2146 - accuracy: 0.9235 - val_loss: 0.3143 - val_accuracy: 0.8657
Epoch 15/40
15000/15000 [==============================] - 4s 270us/sample - loss: 0.1997 - accuracy: 0.9303 - val_loss: 0.3104 - val_accuracy: 0.8697
Epoch 16/40
15000/15000 [==============================] - 4s 265us/sample - loss: 0.1866 - accuracy: 0.9359 - val_loss: 0.3097 - val_accuracy: 0.8686
Epoch 17/40
15000/15000 [==============================] - 4s 269us/sample - loss: 0.1750 - accuracy: 0.9407 - val_loss: 0.3060 - val_accuracy: 0.8732
Epoch 18/40
15000/15000 [==============================] - 4s 277us/sample - loss: 0.1674 - accuracy: 0.9443 - val_loss: 0.3050 - val_accuracy: 0.8744
Epoch 19/40
15000/15000 [==============================] - 4s 265us/sample - loss: 0.1537 - accuracy: 0.9492 - val_loss: 0.3069 - val_accuracy: 0.8718
Epoch 20/40
15000/15000 [==============================] - 4s 269us/sample - loss: 0.1445 - accuracy: 0.9533 - val_loss: 0.3108 - val_accuracy: 0.8711
Epoch 21/40
15000/15000 [==============================] - 4s 259us/sample - loss: 0.1355 - accuracy: 0.9573 - val_loss: 0.3066 - val_accuracy: 0.8757
Epoch 22/40
15000/15000 [==============================] - 4s 262us/sample - loss: 0.1262 - accuracy: 0.9611 - val_loss: 0.3087 - val_accuracy: 0.8759
Epoch 23/40
15000/15000 [==============================] - 4s 265us/sample - loss: 0.1185 - accuracy: 0.9655 - val_loss: 0.3112 - val_accuracy: 0.8765
Epoch 24/40
15000/15000 [==============================] - 4s 267us/sample - loss: 0.1108 - accuracy: 0.9688 - val_loss: 0.3140 - val_accuracy: 0.8758
Epoch 25/40
15000/15000 [==============================] - 4s 265us/sample - loss: 0.1038 - accuracy: 0.9731 - val_loss: 0.3174 - val_accuracy: 0.8748
Epoch 26/40
15000/15000 [==============================] - 4s 264us/sample - loss: 0.0970 - accuracy: 0.9755 - val_loss: 0.3215 - val_accuracy: 0.8751
Epoch 27/40
15000/15000 [==============================] - 4s 263us/sample - loss: 0.0912 - accuracy: 0.9774 - val_loss: 0.3264 - val_accuracy: 0.8746
Epoch 28/40
15000/15000 [==============================] - 4s 268us/sample - loss: 0.0852 - accuracy: 0.9799 - val_loss: 0.3294 - val_accuracy: 0.8751
Epoch 29/40
15000/15000 [==============================] - 4s 266us/sample - loss: 0.0799 - accuracy: 0.9820 - val_loss: 0.3340 - val_accuracy: 0.8756
Epoch 30/40
15000/15000 [==============================] - 4s 266us/sample - loss: 0.0748 - accuracy: 0.9835 - val_loss: 0.3393 - val_accuracy: 0.8758
Epoch 31/40
15000/15000 [==============================] - 4s 268us/sample - loss: 0.0696 - accuracy: 0.9856 - val_loss: 0.3466 - val_accuracy: 0.8722
Epoch 32/40
15000/15000 [==============================] - 4s 266us/sample - loss: 0.0653 - accuracy: 0.9871 - val_loss: 0.3516 - val_accuracy: 0.8731
Epoch 33/40
15000/15000 [==============================] - 4s 268us/sample - loss: 0.0608 - accuracy: 0.9891 - val_loss: 0.3582 - val_accuracy: 0.8706
Epoch 34/40
15000/15000 [==============================] - 4s 263us/sample - loss: 0.0567 - accuracy: 0.9905 - val_loss: 0.3623 - val_accuracy: 0.8731
Epoch 35/40
15000/15000 [==============================] - 4s 267us/sample - loss: 0.0531 - accuracy: 0.9912 - val_loss: 0.3709 - val_accuracy: 0.8706
Epoch 36/40
15000/15000 [==============================] - 4s 260us/sample - loss: 0.0497 - accuracy: 0.9921 - val_loss: 0.3770 - val_accuracy: 0.8709
Epoch 37/40
15000/15000 [==============================] - 4s 259us/sample - loss: 0.0460 - accuracy: 0.9933 - val_loss: 0.3818 - val_accuracy: 0.8715
Epoch 38/40
15000/15000 [==============================] - 4s 260us/sample - loss: 0.0434 - accuracy: 0.9945 - val_loss: 0.3878 - val_accuracy: 0.8722
Epoch 39/40
15000/15000 [==============================] - 4s 259us/sample - loss: 0.0402 - accuracy: 0.9955 - val_loss: 0.3982 - val_accuracy: 0.8694
Epoch 40/40
15000/15000 [==============================] - 4s 261us/sample - loss: 0.0372 - accuracy: 0.9959 - val_loss: 0.4039 - val_accuracy: 0.8691
###Markdown
Evaluate the neural network.
###Code
results = model.evaluate(test_data, test_labels)
print(results)
history_dict = history.history
history_dict.keys()
%matplotlib inline
import matplotlib.pyplot as plt
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
T81-558: Applications of Deep Neural Networks**Module 9: Regularization: L1, L2 and Dropout*** Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/). Module 9 Material* Part 9.1: Introduction to Keras Transfer Learning [[Video]](https://www.youtube.com/watch?v=WLlP6S-Z8Xs&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_1_keras_transfer.ipynb)* Part 9.2: Popular Pretrained Neural Networks for Keras [[Video]](https://www.youtube.com/watch?v=ctVA1_46YEE&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_2_popular_transfer.ipynb)* Part 9.3: Transfer Learning for Computer Vision and Keras [[Video]](https://www.youtube.com/watch?v=61vMUm_XBMI&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_3_transfer_cv.ipynb)* **Part 9.4: Transfer Learning for Languages and Keras** [[Video]](https://www.youtube.com/watch?v=ajmAAg9FxXA&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_4_transfer_nlp.ipynb)* Part 9.5: Transfer Learning for Keras Feature Engineering [[Video]](https://www.youtube.com/watch?v=Dttxsm8zpL8&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_5_transfer_feature_eng.ipynb) Google CoLab InstructionsThe following code ensures that Google CoLab is running the correct version of TensorFlow.
###Code
try:
%tensorflow_version 2.x
COLAB = True
print("Note: using Google CoLab")
except:
print("Note: not using Google CoLab")
COLAB = False
###Output
Note: not using Google CoLab
###Markdown
Part 9.4: Transfer Learning for Languages and KerasTransfer learning is commonly used with Natural Language Processing (NLP). This course has an entire module that covers NLP. However, for now we will look how a NLP network can be loaded into Keras for transfer learning. The following three sources were helpful for the creation of this section.* Cer, D., Yang, Y., Kong, S. Y., Hua, N., Limtiaco, N., John, R. S., ... & Sung, Y. H. (2018). [Universal sentence encoder](https://arxiv.org/abs/1803.11175). arXiv preprint arXiv:1803.11175.* [Deep Transfer Learning for Natural Language Processing: Text Classification with Universal Embeddings](https://towardsdatascience.com/deep-transfer-learning-for-natural-language-processing-text-classification-with-universal-1a2c69e5baa9)* [Keras Tutorial: How to Use Google's Universal Sentence Encoder for Spam Classification](http://hunterheidenreich.com/blog/google-universal-sentence-encoder-in-keras/)These examples make use of TensorFlow Hub, which allows pretrained models to easily be loaded into TensorFlow. To install TensorHub use the following command.
###Code
!pip install tensorflow_hub
###Output
Collecting tensorflow_hub
Using cached https://files.pythonhosted.org/packages/00/0e/a91780d07592b1abf9c91344ce459472cc19db3b67fdf3a61dca6ebb2f5c/tensorflow_hub-0.7.0-py2.py3-none-any.whl
Requirement already satisfied: six>=1.10.0 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_hub) (1.13.0)
Requirement already satisfied: protobuf>=3.4.0 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_hub) (3.11.2)
Requirement already satisfied: numpy>=1.12.0 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_hub) (1.17.4)
Requirement already satisfied: setuptools in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from protobuf>=3.4.0->tensorflow_hub) (42.0.2.post20191203)
Installing collected packages: tensorflow-hub
Successfully installed tensorflow-hub-0.7.0
###Markdown
It is also necessary to install TensorFlow Datasets. This can be done with the following command.
###Code
!pip install tensorflow_datasets
###Output
Collecting tensorflow_datasets
Using cached https://files.pythonhosted.org/packages/8b/2e/77823b3754af2eb2ca6f3676f91741fafa44bc653cfafc267c70afd7355f/tensorflow_datasets-1.3.2-py3-none-any.whl
Requirement already satisfied: tqdm in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (4.40.2)
Requirement already satisfied: future in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (0.18.2)
Requirement already satisfied: attrs in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (19.3.0)
Collecting tensorflow-metadata
Using cached https://files.pythonhosted.org/packages/3b/0c/afb81ea6998f6e26521671585d1cd9d3f7945a8b9834764e91757453dc25/tensorflow_metadata-0.15.2-py2.py3-none-any.whl
Collecting promise
Using cached https://files.pythonhosted.org/packages/cf/9c/fb5d48abfe5d791cd496e4242ebcf87a4bb2e0c3dcd6e0ae68c11426a528/promise-2.3.tar.gz
Requirement already satisfied: termcolor in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.1.0)
Requirement already satisfied: wrapt in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.11.2)
Requirement already satisfied: absl-py in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (0.8.1)
Requirement already satisfied: protobuf>=3.6.1 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (3.11.2)
Requirement already satisfied: six in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.13.0)
Requirement already satisfied: numpy in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.17.4)
Requirement already satisfied: requests>=2.19.0 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (2.22.0)
Collecting dill
Using cached https://files.pythonhosted.org/packages/c7/11/345f3173809cea7f1a193bfbf02403fff250a3360e0e118a1630985e547d/dill-0.3.1.1.tar.gz
Collecting googleapis-common-protos
Using cached https://files.pythonhosted.org/packages/eb/ee/e59e74ecac678a14d6abefb9054f0bbcb318a6452a30df3776f133886d7d/googleapis-common-protos-1.6.0.tar.gz
Requirement already satisfied: setuptools in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from protobuf>=3.6.1->tensorflow_datasets) (42.0.2.post20191203)
Requirement already satisfied: certifi>=2017.4.17 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (2019.11.28)
Requirement already satisfied: chardet<3.1.0,>=3.0.2 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (3.0.4)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (1.24.3)
Requirement already satisfied: idna<2.9,>=2.5 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (2.8)
Building wheels for collected packages: promise, dill, googleapis-common-protos
Building wheel for promise (setup.py): started
Building wheel for promise (setup.py): finished with status 'done'
Created wheel for promise: filename=promise-2.3-cp37-none-any.whl size=21500 sha256=45ca497494456f4971c99c6b3e5e4df9410bf5a2f634e93b02b6771ba9475c20
Stored in directory: C:\Users\jheaton\AppData\Local\pip\Cache\wheels\19\49\34\c3c1e78bcb954c49e5ec0d31784fe63d14d427f316b12fbde9
Building wheel for dill (setup.py): started
Building wheel for dill (setup.py): finished with status 'done'
Created wheel for dill: filename=dill-0.3.1.1-cp37-none-any.whl size=78598 sha256=c65010bd933b8ee5e37109931467dad265b5053f88d714bb4aab373a2755e78b
Stored in directory: C:\Users\jheaton\AppData\Local\pip\Cache\wheels\59\b1\91\f02e76c732915c4015ab4010f3015469866c1eb9b14058d8e7
Building wheel for googleapis-common-protos (setup.py): started
Building wheel for googleapis-common-protos (setup.py): finished with status 'done'
Created wheel for googleapis-common-protos: filename=googleapis_common_protos-1.6.0-cp37-none-any.whl size=77585 sha256=a6964bed5912b68bd2b1887a9c8d8b637a49af9aa029c23ebbae36905fafee92
Stored in directory: C:\Users\jheaton\AppData\Local\pip\Cache\wheels\9e\3d\a2\1bec8bb7db80ab3216dbc33092bb7ccd0debfb8ba42b5668d5
Successfully built promise dill googleapis-common-protos
Installing collected packages: googleapis-common-protos, tensorflow-metadata, promise, dill, tensorflow-datasets
Successfully installed dill-0.3.1.1 googleapis-common-protos-1.6.0 promise-2.3 tensorflow-datasets-1.3.2 tensorflow-metadata-0.15.2
###Markdown
https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/tf2_text_classification.ipynbscrollTo=2ew7HTbPpCJH Load the Internet Movie DataBase (IMDB) reviews data set.
###Code
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
train_data, test_data = tfds.load(name="imdb_reviews", split=["train", "test"],
batch_size=-1, as_supervised=True)
train_examples, train_labels = tfds.as_numpy(train_data)
test_examples, test_labels = tfds.as_numpy(test_data)
# /Users/jheaton/tensorflow_datasets/imdb_reviews/plain_text/0.1.0
###Output
[1mDownloading and preparing dataset imdb_reviews (80.23 MiB) to C:\Users\jheaton\tensorflow_datasets\imdb_reviews\plain_text\0.1.0...[0m
###Markdown
Load a pretrained embedding model called [gnews-swivel-20dim](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1). This was trained by Google on gnews data and can convert RAW text into vectors.
###Code
model = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1"
hub_layer = hub.KerasLayer(model, output_shape=[20], input_shape=[],
dtype=tf.string, trainable=True)
###Output
_____no_output_____
###Markdown
Consider the following 3 movie reviews.
###Code
train_examples[:3]
###Output
_____no_output_____
###Markdown
The embedding layer can convert each to 20-number vectors.
###Code
hub_layer(train_examples[:3])
###Output
_____no_output_____
###Markdown
We add addition layers to attempt to classify the movie reviews as either positive or negative.
###Code
model = tf.keras.Sequential()
model.add(hub_layer)
model.add(tf.keras.layers.Dense(16, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
keras_layer (KerasLayer) (None, 20) 400020
_________________________________________________________________
dense (Dense) (None, 16) 336
_________________________________________________________________
dense_1 (Dense) (None, 1) 17
=================================================================
Total params: 400,373
Trainable params: 400,373
Non-trainable params: 0
_________________________________________________________________
###Markdown
Compile the neural network.
###Code
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Split and train the neural network.
###Code
x_val = train_examples[:10000]
partial_x_train = train_examples[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
###Output
Train on 15000 samples, validate on 10000 samples
Epoch 1/40
15000/15000 [==============================] - 6s 379us/sample - loss: 0.6585 - accuracy: 0.6415 - val_loss: 0.6056 - val_accuracy: 0.6739
Epoch 2/40
15000/15000 [==============================] - 4s 272us/sample - loss: 0.5759 - accuracy: 0.7069 - val_loss: 0.5587 - val_accuracy: 0.7135
Epoch 3/40
15000/15000 [==============================] - 4s 266us/sample - loss: 0.5284 - accuracy: 0.7452 - val_loss: 0.5209 - val_accuracy: 0.7444
Epoch 4/40
15000/15000 [==============================] - 4s 267us/sample - loss: 0.4847 - accuracy: 0.7751 - val_loss: 0.4853 - val_accuracy: 0.7706oss: 0.4852 - accuracy: 0.77
Epoch 5/40
15000/15000 [==============================] - 4s 264us/sample - loss: 0.4425 - accuracy: 0.8038 - val_loss: 0.4547 - val_accuracy: 0.7879
Epoch 6/40
15000/15000 [==============================] - 4s 257us/sample - loss: 0.4033 - accuracy: 0.8279 - val_loss: 0.4214 - val_accuracy: 0.8090
Epoch 7/40
15000/15000 [==============================] - 4s 262us/sample - loss: 0.3679 - accuracy: 0.8496 - val_loss: 0.3983 - val_accuracy: 0.8191
Epoch 8/40
15000/15000 [==============================] - 4s 260us/sample - loss: 0.3387 - accuracy: 0.8635 - val_loss: 0.3757 - val_accuracy: 0.8323
Epoch 9/40
15000/15000 [==============================] - 4s 264us/sample - loss: 0.3111 - accuracy: 0.8779 - val_loss: 0.3649 - val_accuracy: 0.8371
Epoch 10/40
15000/15000 [==============================] - 4s 261us/sample - loss: 0.2890 - accuracy: 0.8879 - val_loss: 0.3502 - val_accuracy: 0.8466
Epoch 11/40
15000/15000 [==============================] - 4s 268us/sample - loss: 0.2658 - accuracy: 0.9000 - val_loss: 0.3353 - val_accuracy: 0.8539
Epoch 12/40
15000/15000 [==============================] - 4s 266us/sample - loss: 0.2462 - accuracy: 0.9089 - val_loss: 0.3265 - val_accuracy: 0.8594
Epoch 13/40
15000/15000 [==============================] - 4s 266us/sample - loss: 0.2300 - accuracy: 0.9163 - val_loss: 0.3196 - val_accuracy: 0.8638
Epoch 14/40
15000/15000 [==============================] - 4s 261us/sample - loss: 0.2146 - accuracy: 0.9235 - val_loss: 0.3143 - val_accuracy: 0.8657
Epoch 15/40
15000/15000 [==============================] - 4s 270us/sample - loss: 0.1997 - accuracy: 0.9303 - val_loss: 0.3104 - val_accuracy: 0.8697
Epoch 16/40
15000/15000 [==============================] - 4s 265us/sample - loss: 0.1866 - accuracy: 0.9359 - val_loss: 0.3097 - val_accuracy: 0.8686
Epoch 17/40
15000/15000 [==============================] - 4s 269us/sample - loss: 0.1750 - accuracy: 0.9407 - val_loss: 0.3060 - val_accuracy: 0.8732
Epoch 18/40
15000/15000 [==============================] - 4s 277us/sample - loss: 0.1674 - accuracy: 0.9443 - val_loss: 0.3050 - val_accuracy: 0.8744
Epoch 19/40
15000/15000 [==============================] - 4s 265us/sample - loss: 0.1537 - accuracy: 0.9492 - val_loss: 0.3069 - val_accuracy: 0.8718
Epoch 20/40
15000/15000 [==============================] - 4s 269us/sample - loss: 0.1445 - accuracy: 0.9533 - val_loss: 0.3108 - val_accuracy: 0.8711
Epoch 21/40
15000/15000 [==============================] - 4s 259us/sample - loss: 0.1355 - accuracy: 0.9573 - val_loss: 0.3066 - val_accuracy: 0.8757
Epoch 22/40
15000/15000 [==============================] - 4s 262us/sample - loss: 0.1262 - accuracy: 0.9611 - val_loss: 0.3087 - val_accuracy: 0.8759
Epoch 23/40
15000/15000 [==============================] - 4s 265us/sample - loss: 0.1185 - accuracy: 0.9655 - val_loss: 0.3112 - val_accuracy: 0.8765
Epoch 24/40
15000/15000 [==============================] - 4s 267us/sample - loss: 0.1108 - accuracy: 0.9688 - val_loss: 0.3140 - val_accuracy: 0.8758
Epoch 25/40
15000/15000 [==============================] - 4s 265us/sample - loss: 0.1038 - accuracy: 0.9731 - val_loss: 0.3174 - val_accuracy: 0.8748
Epoch 26/40
15000/15000 [==============================] - 4s 264us/sample - loss: 0.0970 - accuracy: 0.9755 - val_loss: 0.3215 - val_accuracy: 0.8751
Epoch 27/40
15000/15000 [==============================] - 4s 263us/sample - loss: 0.0912 - accuracy: 0.9774 - val_loss: 0.3264 - val_accuracy: 0.8746
Epoch 28/40
15000/15000 [==============================] - 4s 268us/sample - loss: 0.0852 - accuracy: 0.9799 - val_loss: 0.3294 - val_accuracy: 0.8751
Epoch 29/40
15000/15000 [==============================] - 4s 266us/sample - loss: 0.0799 - accuracy: 0.9820 - val_loss: 0.3340 - val_accuracy: 0.8756
Epoch 30/40
15000/15000 [==============================] - 4s 266us/sample - loss: 0.0748 - accuracy: 0.9835 - val_loss: 0.3393 - val_accuracy: 0.8758
Epoch 31/40
15000/15000 [==============================] - 4s 268us/sample - loss: 0.0696 - accuracy: 0.9856 - val_loss: 0.3466 - val_accuracy: 0.8722
Epoch 32/40
15000/15000 [==============================] - 4s 266us/sample - loss: 0.0653 - accuracy: 0.9871 - val_loss: 0.3516 - val_accuracy: 0.8731
Epoch 33/40
15000/15000 [==============================] - 4s 268us/sample - loss: 0.0608 - accuracy: 0.9891 - val_loss: 0.3582 - val_accuracy: 0.8706
Epoch 34/40
15000/15000 [==============================] - 4s 263us/sample - loss: 0.0567 - accuracy: 0.9905 - val_loss: 0.3623 - val_accuracy: 0.8731
Epoch 35/40
15000/15000 [==============================] - 4s 267us/sample - loss: 0.0531 - accuracy: 0.9912 - val_loss: 0.3709 - val_accuracy: 0.8706
Epoch 36/40
15000/15000 [==============================] - 4s 260us/sample - loss: 0.0497 - accuracy: 0.9921 - val_loss: 0.3770 - val_accuracy: 0.8709
Epoch 37/40
15000/15000 [==============================] - 4s 259us/sample - loss: 0.0460 - accuracy: 0.9933 - val_loss: 0.3818 - val_accuracy: 0.8715
Epoch 38/40
15000/15000 [==============================] - 4s 260us/sample - loss: 0.0434 - accuracy: 0.9945 - val_loss: 0.3878 - val_accuracy: 0.8722
Epoch 39/40
15000/15000 [==============================] - 4s 259us/sample - loss: 0.0402 - accuracy: 0.9955 - val_loss: 0.3982 - val_accuracy: 0.8694
Epoch 40/40
15000/15000 [==============================] - 4s 261us/sample - loss: 0.0372 - accuracy: 0.9959 - val_loss: 0.4039 - val_accuracy: 0.8691
###Markdown
Evaluate the neural network.
###Code
results = model.evaluate(test_data, test_labels)
print(results)
history_dict = history.history
history_dict.keys()
%matplotlib inline
import matplotlib.pyplot as plt
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
T81-558: Applications of Deep Neural Networks**Module 9: Regularization: L1, L2 and Dropout*** Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/). Module 9 Material* Part 9.1: Introduction to Keras Transfer Learning [[Video]](https://www.youtube.com/watch?v=WLlP6S-Z8Xs&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_1_keras_transfer.ipynb)* Part 9.2: Popular Pretrained Neural Networks for Keras [[Video]](https://www.youtube.com/watch?v=ctVA1_46YEE&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_2_popular_transfer.ipynb)* Part 9.3: Transfer Learning for Computer Vision and Keras [[Video]](https://www.youtube.com/watch?v=61vMUm_XBMI&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_3_transfer_cv.ipynb)* **Part 9.4: Transfer Learning for Languages and Keras** [[Video]](https://www.youtube.com/watch?v=ajmAAg9FxXA&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_4_transfer_nlp.ipynb)* Part 9.5: Transfer Learning for Keras Feature Engineering [[Video]](https://www.youtube.com/watch?v=Dttxsm8zpL8&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_5_transfer_feature_eng.ipynb) Part 9.4: Transfer Learning for Languages and KerasTransfer learning is commonly used with Natural Language Processing (NLP). This course has an entire module that covers NLP. However, for now we will look how a NLP network can be loaded into Keras for transfer learning. The following three sources were helpful for the creation of this section.* Cer, D., Yang, Y., Kong, S. Y., Hua, N., Limtiaco, N., John, R. S., ... & Sung, Y. H. (2018). [Universal sentence encoder](https://arxiv.org/abs/1803.11175). arXiv preprint arXiv:1803.11175.* [Deep Transfer Learning for Natural Language Processing — Text Classification with Universal Embeddings](https://towardsdatascience.com/deep-transfer-learning-for-natural-language-processing-text-classification-with-universal-1a2c69e5baa9)* [Keras Tutorial - How to Use Google's Universal Sentence Encoder for Spam Classification](http://hunterheidenreich.com/blog/google-universal-sentence-encoder-in-keras/)These examples make use of TensorFlow Hub, which allows pretrained models to easily be loaded into TensorFlow. To install TensorHub use the following command.
###Code
!pip install tensorflow_hub
###Output
Requirement already satisfied: tensorflow_hub in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (0.5.0)
Requirement already satisfied: numpy>=1.12.0 in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_hub) (1.16.4)
Requirement already satisfied: six>=1.10.0 in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_hub) (1.12.0)
Requirement already satisfied: protobuf>=3.4.0 in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_hub) (3.9.1)
Requirement already satisfied: setuptools in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from protobuf>=3.4.0->tensorflow_hub) (41.0.1)
###Markdown
It is also necessary to install TensorFlow Datasets. This can be done with the following command.
###Code
!pip install tensorflow_datasets
###Output
Requirement already satisfied: tensorflow_datasets in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (1.1.0)
Requirement already satisfied: tensorflow-metadata in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (0.14.0)
Requirement already satisfied: requests>=2.19.0 in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (2.22.0)
Requirement already satisfied: promise in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (2.2.1)
Requirement already satisfied: dill in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (0.3.0)
Requirement already satisfied: protobuf>=3.6.1 in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (3.9.1)
Requirement already satisfied: tqdm in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (4.33.0)
Requirement already satisfied: six in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.12.0)
Requirement already satisfied: wrapt in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.11.2)
Requirement already satisfied: numpy in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.16.4)
Requirement already satisfied: psutil in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (5.6.3)
Requirement already satisfied: termcolor in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.1.0)
Requirement already satisfied: future in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (0.17.1)
Requirement already satisfied: attrs in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (19.1.0)
Requirement already satisfied: absl-py in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (0.7.1)
Requirement already satisfied: googleapis-common-protos in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow-metadata->tensorflow_datasets) (1.6.0)
Requirement already satisfied: idna<2.9,>=2.5 in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (2.8)
Requirement already satisfied: certifi>=2017.4.17 in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (2019.6.16)
Requirement already satisfied: chardet<3.1.0,>=3.0.2 in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (3.0.4)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (1.25.3)
Requirement already satisfied: setuptools in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from protobuf>=3.6.1->tensorflow_datasets) (41.0.1)
###Markdown
https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/tf2_text_classification.ipynbscrollTo=2ew7HTbPpCJH Load the Internet Movie DataBase (IMDB) reviews data set.
###Code
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
train_data, test_data = tfds.load(name="imdb_reviews", split=["train", "test"],
batch_size=-1, as_supervised=True)
train_examples, train_labels = tfds.as_numpy(train_data)
test_examples, test_labels = tfds.as_numpy(test_data)
# /Users/jheaton/tensorflow_datasets/imdb_reviews/plain_text/0.1.0
###Output
WARNING: Logging before flag parsing goes to stderr.
W0819 16:45:54.504038 19696 dataset_builder.py:439] Warning: Setting shuffle_files=True because split=TRAIN and shuffle_files=None. This behavior will be deprecated on 2019-08-06, at which point shuffle_files=False will be the default for all splits.
###Markdown
Load a pretrained embedding model called [gnews-swivel-20dim](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1). This was trained by Google on gnews data and can convert RAW text into vectors.
###Code
model = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1"
hub_layer = hub.KerasLayer(model, output_shape=[20], input_shape=[],
dtype=tf.string, trainable=True)
###Output
_____no_output_____
###Markdown
Consider the following 3 movie reviews.
###Code
train_examples[:3]
###Output
_____no_output_____
###Markdown
The embedding layer can convert each to 20-number vectors.
###Code
hub_layer(train_examples[:3])
###Output
_____no_output_____
###Markdown
We add addition layers to attempt to classify the movie reviews as either positive or negative.
###Code
model = tf.keras.Sequential()
model.add(hub_layer)
model.add(tf.keras.layers.Dense(16, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
keras_layer (KerasLayer) (None, 20) 400020
_________________________________________________________________
dense (Dense) (None, 16) 336
_________________________________________________________________
dense_1 (Dense) (None, 1) 17
=================================================================
Total params: 400,373
Trainable params: 400,373
Non-trainable params: 0
_________________________________________________________________
###Markdown
Compile the neural network.
###Code
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Split and train the neural network.
###Code
x_val = train_examples[:10000]
partial_x_train = train_examples[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
###Output
W0819 16:45:57.484114 19696 training_utils.py:1210] When passing input data as arrays, do not specify `steps_per_epoch`/`steps` argument. Please use `batch_size` instead.
###Markdown
Evaluate the neural network.
###Code
results = model.evaluate(test_data, test_labels)
print(results)
history_dict = history.history
history_dict.keys()
%matplotlib inline
import matplotlib.pyplot as plt
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
T81-558: Applications of Deep Neural Networks**Module 9: Regularization: L1, L2 and Dropout*** Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/). Module 9 Material* Part 9.1: Introduction to Keras Transfer Learning [[Video]](https://www.youtube.com/watch?v=WLlP6S-Z8Xs&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_1_keras_transfer.ipynb)* Part 9.2: Popular Pretrained Neural Networks for Keras [[Video]](https://www.youtube.com/watch?v=ctVA1_46YEE&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_2_popular_transfer.ipynb)* Part 9.3: Transfer Learning for Computer Vision and Keras [[Video]](https://www.youtube.com/watch?v=61vMUm_XBMI&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_3_transfer_cv.ipynb)* **Part 9.4: Transfer Learning for Languages and Keras** [[Video]](https://www.youtube.com/watch?v=ajmAAg9FxXA&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_4_transfer_nlp.ipynb)* Part 9.5: Transfer Learning for Keras Feature Engineering [[Video]](https://www.youtube.com/watch?v=Dttxsm8zpL8&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_5_transfer_feature_eng.ipynb) Google CoLab InstructionsThe following code ensures that Google CoLab is running the correct version of TensorFlow.
###Code
try:
%tensorflow_version 2.x
COLAB = True
print("Note: using Google CoLab")
except:
print("Note: not using Google CoLab")
COLAB = False
###Output
Note: not using Google CoLab
###Markdown
Part 9.4: Transfer Learning for Languages and KerasYou will commonly use transfer learning in conjunction with Natural Language Processing (NLP). This course has an entire module that covers NLP. However, we will look at how you can load a network into Keras for NLP via transfer learning. The following three sources were helpful for the creation of this section.* Universal sentence encoder [[Cite:cer2018universal]](https://arxiv.org/abs/1803.11175). arXiv preprint arXiv:1803.11175)* Deep Transfer Learning for Natural Language Processing: Text Classification with Universal Embeddings [[Cite:howard2018universal]](https://towardsdatascience.com/deep-transfer-learning-for-natural-language-processing-text-classification-with-universal-1a2c69e5baa9)* [Keras Tutorial: How to Use Google's Universal Sentence Encoder for Spam Classification](http://hunterheidenreich.com/blog/google-universal-sentence-encoder-in-keras/)These examples make use of TensorFlow Hub, which allows pretrained models to be loaded into TensorFlow easily. To install TensorHub use the following commands.
###Code
!pip install tensorflow_hub
###Output
Collecting tensorflow_hub
Using cached https://files.pythonhosted.org/packages/00/0e/a91780d07592b1abf9c91344ce459472cc19db3b67fdf3a61dca6ebb2f5c/tensorflow_hub-0.7.0-py2.py3-none-any.whl
Requirement already satisfied: six>=1.10.0 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_hub) (1.13.0)
Requirement already satisfied: protobuf>=3.4.0 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_hub) (3.11.2)
Requirement already satisfied: numpy>=1.12.0 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_hub) (1.17.4)
Requirement already satisfied: setuptools in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from protobuf>=3.4.0->tensorflow_hub) (42.0.2.post20191203)
Installing collected packages: tensorflow-hub
Successfully installed tensorflow-hub-0.7.0
###Markdown
It is also necessary to install TensorFlow Datasets, which you can install \with the following command.
###Code
!pip install tensorflow_datasets
###Output
Collecting tensorflow_datasets
Using cached https://files.pythonhosted.org/packages/8b/2e/77823b3754af2eb2ca6f3676f91741fafa44bc653cfafc267c70afd7355f/tensorflow_datasets-1.3.2-py3-none-any.whl
Requirement already satisfied: tqdm in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (4.40.2)
Requirement already satisfied: future in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (0.18.2)
Requirement already satisfied: attrs in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (19.3.0)
Collecting tensorflow-metadata
Using cached https://files.pythonhosted.org/packages/3b/0c/afb81ea6998f6e26521671585d1cd9d3f7945a8b9834764e91757453dc25/tensorflow_metadata-0.15.2-py2.py3-none-any.whl
Collecting promise
Using cached https://files.pythonhosted.org/packages/cf/9c/fb5d48abfe5d791cd496e4242ebcf87a4bb2e0c3dcd6e0ae68c11426a528/promise-2.3.tar.gz
Requirement already satisfied: termcolor in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.1.0)
Requirement already satisfied: wrapt in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.11.2)
Requirement already satisfied: absl-py in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (0.8.1)
Requirement already satisfied: protobuf>=3.6.1 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (3.11.2)
Requirement already satisfied: six in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.13.0)
Requirement already satisfied: numpy in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.17.4)
Requirement already satisfied: requests>=2.19.0 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (2.22.0)
Collecting dill
Using cached https://files.pythonhosted.org/packages/c7/11/345f3173809cea7f1a193bfbf02403fff250a3360e0e118a1630985e547d/dill-0.3.1.1.tar.gz
Collecting googleapis-common-protos
Using cached https://files.pythonhosted.org/packages/eb/ee/e59e74ecac678a14d6abefb9054f0bbcb318a6452a30df3776f133886d7d/googleapis-common-protos-1.6.0.tar.gz
Requirement already satisfied: setuptools in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from protobuf>=3.6.1->tensorflow_datasets) (42.0.2.post20191203)
Requirement already satisfied: certifi>=2017.4.17 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (2019.11.28)
Requirement already satisfied: chardet<3.1.0,>=3.0.2 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (3.0.4)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (1.24.3)
Requirement already satisfied: idna<2.9,>=2.5 in c:\users\jheaton\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (2.8)
Building wheels for collected packages: promise, dill, googleapis-common-protos
Building wheel for promise (setup.py): started
Building wheel for promise (setup.py): finished with status 'done'
Created wheel for promise: filename=promise-2.3-cp37-none-any.whl size=21500 sha256=45ca497494456f4971c99c6b3e5e4df9410bf5a2f634e93b02b6771ba9475c20
Stored in directory: C:\Users\jheaton\AppData\Local\pip\Cache\wheels\19\49\34\c3c1e78bcb954c49e5ec0d31784fe63d14d427f316b12fbde9
Building wheel for dill (setup.py): started
Building wheel for dill (setup.py): finished with status 'done'
Created wheel for dill: filename=dill-0.3.1.1-cp37-none-any.whl size=78598 sha256=c65010bd933b8ee5e37109931467dad265b5053f88d714bb4aab373a2755e78b
Stored in directory: C:\Users\jheaton\AppData\Local\pip\Cache\wheels\59\b1\91\f02e76c732915c4015ab4010f3015469866c1eb9b14058d8e7
Building wheel for googleapis-common-protos (setup.py): started
Building wheel for googleapis-common-protos (setup.py): finished with status 'done'
Created wheel for googleapis-common-protos: filename=googleapis_common_protos-1.6.0-cp37-none-any.whl size=77585 sha256=a6964bed5912b68bd2b1887a9c8d8b637a49af9aa029c23ebbae36905fafee92
Stored in directory: C:\Users\jheaton\AppData\Local\pip\Cache\wheels\9e\3d\a2\1bec8bb7db80ab3216dbc33092bb7ccd0debfb8ba42b5668d5
Successfully built promise dill googleapis-common-protos
Installing collected packages: googleapis-common-protos, tensorflow-metadata, promise, dill, tensorflow-datasets
Successfully installed dill-0.3.1.1 googleapis-common-protos-1.6.0 promise-2.3 tensorflow-datasets-1.3.2 tensorflow-metadata-0.15.2
###Markdown
Load the Internet Movie DataBase (IMDB) reviews data set. This example is based on a TensorFlow example that you can [find here](https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/tf2_text_classification.ipynbscrollTo=2ew7HTbPpCJH).
###Code
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
train_data, test_data = tfds.load(name="imdb_reviews", split=["train", "test"],
batch_size=-1, as_supervised=True)
train_examples, train_labels = tfds.as_numpy(train_data)
test_examples, test_labels = tfds.as_numpy(test_data)
# /Users/jheaton/tensorflow_datasets/imdb_reviews/plain_text/0.1.0
###Output
[1mDownloading and preparing dataset imdb_reviews (80.23 MiB) to C:\Users\jheaton\tensorflow_datasets\imdb_reviews\plain_text\0.1.0...[0m
###Markdown
Load a pretrained embedding model called [gnews-swivel-20dim](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1). This network was trained by Google on GNEWS data and can convert RAW text into vectors.
###Code
model = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1"
hub_layer = hub.KerasLayer(model, output_shape=[20], input_shape=[],
dtype=tf.string, trainable=True)
###Output
_____no_output_____
###Markdown
Consider the following three movie reviews.
###Code
train_examples[:3]
###Output
_____no_output_____
###Markdown
The embedding layer can convert each to 20-number vectors.
###Code
hub_layer(train_examples[:3])
###Output
_____no_output_____
###Markdown
We add additional layers to attempt to classify the movie reviews as either positive or negative.
###Code
model = tf.keras.Sequential()
model.add(hub_layer)
model.add(tf.keras.layers.Dense(16, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
keras_layer (KerasLayer) (None, 20) 400020
_________________________________________________________________
dense (Dense) (None, 16) 336
_________________________________________________________________
dense_1 (Dense) (None, 1) 17
=================================================================
Total params: 400,373
Trainable params: 400,373
Non-trainable params: 0
_________________________________________________________________
###Markdown
Compile the neural network.
###Code
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Split and train the neural network.
###Code
x_val = train_examples[:10000]
partial_x_train = train_examples[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
###Output
Train on 15000 samples, validate on 10000 samples
Epoch 1/40
15000/15000 [==============================] - 6s 379us/sample - loss: 0.6585 - accuracy: 0.6415 - val_loss: 0.6056 - val_accuracy: 0.6739
Epoch 2/40
15000/15000 [==============================] - 4s 272us/sample - loss: 0.5759 - accuracy: 0.7069 - val_loss: 0.5587 - val_accuracy: 0.7135
Epoch 3/40
15000/15000 [==============================] - 4s 266us/sample - loss: 0.5284 - accuracy: 0.7452 - val_loss: 0.5209 - val_accuracy: 0.7444
Epoch 4/40
15000/15000 [==============================] - 4s 267us/sample - loss: 0.4847 - accuracy: 0.7751 - val_loss: 0.4853 - val_accuracy: 0.7706oss: 0.4852 - accuracy: 0.77
Epoch 5/40
15000/15000 [==============================] - 4s 264us/sample - loss: 0.4425 - accuracy: 0.8038 - val_loss: 0.4547 - val_accuracy: 0.7879
Epoch 6/40
15000/15000 [==============================] - 4s 257us/sample - loss: 0.4033 - accuracy: 0.8279 - val_loss: 0.4214 - val_accuracy: 0.8090
Epoch 7/40
15000/15000 [==============================] - 4s 262us/sample - loss: 0.3679 - accuracy: 0.8496 - val_loss: 0.3983 - val_accuracy: 0.8191
Epoch 8/40
15000/15000 [==============================] - 4s 260us/sample - loss: 0.3387 - accuracy: 0.8635 - val_loss: 0.3757 - val_accuracy: 0.8323
Epoch 9/40
15000/15000 [==============================] - 4s 264us/sample - loss: 0.3111 - accuracy: 0.8779 - val_loss: 0.3649 - val_accuracy: 0.8371
Epoch 10/40
15000/15000 [==============================] - 4s 261us/sample - loss: 0.2890 - accuracy: 0.8879 - val_loss: 0.3502 - val_accuracy: 0.8466
Epoch 11/40
15000/15000 [==============================] - 4s 268us/sample - loss: 0.2658 - accuracy: 0.9000 - val_loss: 0.3353 - val_accuracy: 0.8539
Epoch 12/40
15000/15000 [==============================] - 4s 266us/sample - loss: 0.2462 - accuracy: 0.9089 - val_loss: 0.3265 - val_accuracy: 0.8594
Epoch 13/40
15000/15000 [==============================] - 4s 266us/sample - loss: 0.2300 - accuracy: 0.9163 - val_loss: 0.3196 - val_accuracy: 0.8638
Epoch 14/40
15000/15000 [==============================] - 4s 261us/sample - loss: 0.2146 - accuracy: 0.9235 - val_loss: 0.3143 - val_accuracy: 0.8657
Epoch 15/40
15000/15000 [==============================] - 4s 270us/sample - loss: 0.1997 - accuracy: 0.9303 - val_loss: 0.3104 - val_accuracy: 0.8697
Epoch 16/40
15000/15000 [==============================] - 4s 265us/sample - loss: 0.1866 - accuracy: 0.9359 - val_loss: 0.3097 - val_accuracy: 0.8686
Epoch 17/40
15000/15000 [==============================] - 4s 269us/sample - loss: 0.1750 - accuracy: 0.9407 - val_loss: 0.3060 - val_accuracy: 0.8732
Epoch 18/40
15000/15000 [==============================] - 4s 277us/sample - loss: 0.1674 - accuracy: 0.9443 - val_loss: 0.3050 - val_accuracy: 0.8744
Epoch 19/40
15000/15000 [==============================] - 4s 265us/sample - loss: 0.1537 - accuracy: 0.9492 - val_loss: 0.3069 - val_accuracy: 0.8718
Epoch 20/40
15000/15000 [==============================] - 4s 269us/sample - loss: 0.1445 - accuracy: 0.9533 - val_loss: 0.3108 - val_accuracy: 0.8711
Epoch 21/40
15000/15000 [==============================] - 4s 259us/sample - loss: 0.1355 - accuracy: 0.9573 - val_loss: 0.3066 - val_accuracy: 0.8757
Epoch 22/40
15000/15000 [==============================] - 4s 262us/sample - loss: 0.1262 - accuracy: 0.9611 - val_loss: 0.3087 - val_accuracy: 0.8759
Epoch 23/40
15000/15000 [==============================] - 4s 265us/sample - loss: 0.1185 - accuracy: 0.9655 - val_loss: 0.3112 - val_accuracy: 0.8765
Epoch 24/40
15000/15000 [==============================] - 4s 267us/sample - loss: 0.1108 - accuracy: 0.9688 - val_loss: 0.3140 - val_accuracy: 0.8758
Epoch 25/40
15000/15000 [==============================] - 4s 265us/sample - loss: 0.1038 - accuracy: 0.9731 - val_loss: 0.3174 - val_accuracy: 0.8748
Epoch 26/40
15000/15000 [==============================] - 4s 264us/sample - loss: 0.0970 - accuracy: 0.9755 - val_loss: 0.3215 - val_accuracy: 0.8751
Epoch 27/40
15000/15000 [==============================] - 4s 263us/sample - loss: 0.0912 - accuracy: 0.9774 - val_loss: 0.3264 - val_accuracy: 0.8746
Epoch 28/40
15000/15000 [==============================] - 4s 268us/sample - loss: 0.0852 - accuracy: 0.9799 - val_loss: 0.3294 - val_accuracy: 0.8751
Epoch 29/40
15000/15000 [==============================] - 4s 266us/sample - loss: 0.0799 - accuracy: 0.9820 - val_loss: 0.3340 - val_accuracy: 0.8756
Epoch 30/40
15000/15000 [==============================] - 4s 266us/sample - loss: 0.0748 - accuracy: 0.9835 - val_loss: 0.3393 - val_accuracy: 0.8758
Epoch 31/40
15000/15000 [==============================] - 4s 268us/sample - loss: 0.0696 - accuracy: 0.9856 - val_loss: 0.3466 - val_accuracy: 0.8722
Epoch 32/40
15000/15000 [==============================] - 4s 266us/sample - loss: 0.0653 - accuracy: 0.9871 - val_loss: 0.3516 - val_accuracy: 0.8731
Epoch 33/40
15000/15000 [==============================] - 4s 268us/sample - loss: 0.0608 - accuracy: 0.9891 - val_loss: 0.3582 - val_accuracy: 0.8706
Epoch 34/40
15000/15000 [==============================] - 4s 263us/sample - loss: 0.0567 - accuracy: 0.9905 - val_loss: 0.3623 - val_accuracy: 0.8731
Epoch 35/40
15000/15000 [==============================] - 4s 267us/sample - loss: 0.0531 - accuracy: 0.9912 - val_loss: 0.3709 - val_accuracy: 0.8706
Epoch 36/40
15000/15000 [==============================] - 4s 260us/sample - loss: 0.0497 - accuracy: 0.9921 - val_loss: 0.3770 - val_accuracy: 0.8709
Epoch 37/40
15000/15000 [==============================] - 4s 259us/sample - loss: 0.0460 - accuracy: 0.9933 - val_loss: 0.3818 - val_accuracy: 0.8715
Epoch 38/40
15000/15000 [==============================] - 4s 260us/sample - loss: 0.0434 - accuracy: 0.9945 - val_loss: 0.3878 - val_accuracy: 0.8722
Epoch 39/40
15000/15000 [==============================] - 4s 259us/sample - loss: 0.0402 - accuracy: 0.9955 - val_loss: 0.3982 - val_accuracy: 0.8694
Epoch 40/40
15000/15000 [==============================] - 4s 261us/sample - loss: 0.0372 - accuracy: 0.9959 - val_loss: 0.4039 - val_accuracy: 0.8691
###Markdown
Evaluate the neural network.
###Code
results = model.evaluate(test_data, test_labels)
print(results)
history_dict = history.history
history_dict.keys()
%matplotlib inline
import matplotlib.pyplot as plt
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
T81-558: Applications of Deep Neural Networks**Module 9: Regularization: L1, L2 and Dropout*** Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/). Module 9 Material* Part 9.1: Introduction to Keras Transfer Learning [[Video]](https://www.youtube.com/watch?v=WLlP6S-Z8Xs&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_1_keras_transfer.ipynb)* Part 9.2: Popular Pretrained Neural Networks for Keras [[Video]](https://www.youtube.com/watch?v=ctVA1_46YEE&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_2_popular_transfer.ipynb)* Part 9.3: Transfer Learning for Computer Vision and Keras [[Video]](https://www.youtube.com/watch?v=61vMUm_XBMI&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_3_transfer_cv.ipynb)* **Part 9.4: Transfer Learning for Languages and Keras** [[Video]](https://www.youtube.com/watch?v=ajmAAg9FxXA&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_4_transfer_nlp.ipynb)* Part 9.5: Transfer Learning for Keras Feature Engineering [[Video]](https://www.youtube.com/watch?v=Dttxsm8zpL8&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_09_5_transfer_feature_eng.ipynb) Part 9.4: Transfer Learning for Languages and KerasTransfer learning is commonly used with Natural Language Processing (NLP). This course has an entire module that covers NLP. However, for now we will look how a NLP network can be loaded into Keras for transfer learning. The following three sources were helpful for the creation of this section.* Cer, D., Yang, Y., Kong, S. Y., Hua, N., Limtiaco, N., John, R. S., ... & Sung, Y. H. (2018). [Universal sentence encoder](https://arxiv.org/abs/1803.11175). arXiv preprint arXiv:1803.11175.* [Deep Transfer Learning for Natural Language Processing: Text Classification with Universal Embeddings](https://towardsdatascience.com/deep-transfer-learning-for-natural-language-processing-text-classification-with-universal-1a2c69e5baa9)* [Keras Tutorial: How to Use Google's Universal Sentence Encoder for Spam Classification](http://hunterheidenreich.com/blog/google-universal-sentence-encoder-in-keras/)These examples make use of TensorFlow Hub, which allows pretrained models to easily be loaded into TensorFlow. To install TensorHub use the following command.
###Code
!pip install tensorflow_hub
###Output
Requirement already satisfied: tensorflow_hub in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (0.5.0)
Requirement already satisfied: numpy>=1.12.0 in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_hub) (1.16.4)
Requirement already satisfied: six>=1.10.0 in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_hub) (1.12.0)
Requirement already satisfied: protobuf>=3.4.0 in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_hub) (3.9.1)
Requirement already satisfied: setuptools in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from protobuf>=3.4.0->tensorflow_hub) (41.0.1)
###Markdown
It is also necessary to install TensorFlow Datasets. This can be done with the following command.
###Code
!pip install tensorflow_datasets
###Output
Requirement already satisfied: tensorflow_datasets in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (1.1.0)
Requirement already satisfied: tensorflow-metadata in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (0.14.0)
Requirement already satisfied: requests>=2.19.0 in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (2.22.0)
Requirement already satisfied: promise in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (2.2.1)
Requirement already satisfied: dill in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (0.3.0)
Requirement already satisfied: protobuf>=3.6.1 in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (3.9.1)
Requirement already satisfied: tqdm in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (4.33.0)
Requirement already satisfied: six in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.12.0)
Requirement already satisfied: wrapt in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.11.2)
Requirement already satisfied: numpy in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.16.4)
Requirement already satisfied: psutil in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (5.6.3)
Requirement already satisfied: termcolor in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.1.0)
Requirement already satisfied: future in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (0.17.1)
Requirement already satisfied: attrs in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (19.1.0)
Requirement already satisfied: absl-py in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (0.7.1)
Requirement already satisfied: googleapis-common-protos in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow-metadata->tensorflow_datasets) (1.6.0)
Requirement already satisfied: idna<2.9,>=2.5 in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (2.8)
Requirement already satisfied: certifi>=2017.4.17 in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (2019.6.16)
Requirement already satisfied: chardet<3.1.0,>=3.0.2 in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (3.0.4)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (1.25.3)
Requirement already satisfied: setuptools in c:\users\jheaton\appdata\local\continuum\miniconda3\envs\tensorflow\lib\site-packages (from protobuf>=3.6.1->tensorflow_datasets) (41.0.1)
###Markdown
https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/tf2_text_classification.ipynbscrollTo=2ew7HTbPpCJH Load the Internet Movie DataBase (IMDB) reviews data set.
###Code
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
train_data, test_data = tfds.load(name="imdb_reviews", split=["train", "test"],
batch_size=-1, as_supervised=True)
train_examples, train_labels = tfds.as_numpy(train_data)
test_examples, test_labels = tfds.as_numpy(test_data)
# /Users/jheaton/tensorflow_datasets/imdb_reviews/plain_text/0.1.0
###Output
WARNING: Logging before flag parsing goes to stderr.
W0819 16:45:54.504038 19696 dataset_builder.py:439] Warning: Setting shuffle_files=True because split=TRAIN and shuffle_files=None. This behavior will be deprecated on 2019-08-06, at which point shuffle_files=False will be the default for all splits.
###Markdown
Load a pretrained embedding model called [gnews-swivel-20dim](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1). This was trained by Google on gnews data and can convert RAW text into vectors.
###Code
model = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1"
hub_layer = hub.KerasLayer(model, output_shape=[20], input_shape=[],
dtype=tf.string, trainable=True)
###Output
_____no_output_____
###Markdown
Consider the following 3 movie reviews.
###Code
train_examples[:3]
###Output
_____no_output_____
###Markdown
The embedding layer can convert each to 20-number vectors.
###Code
hub_layer(train_examples[:3])
###Output
_____no_output_____
###Markdown
We add addition layers to attempt to classify the movie reviews as either positive or negative.
###Code
model = tf.keras.Sequential()
model.add(hub_layer)
model.add(tf.keras.layers.Dense(16, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
keras_layer (KerasLayer) (None, 20) 400020
_________________________________________________________________
dense (Dense) (None, 16) 336
_________________________________________________________________
dense_1 (Dense) (None, 1) 17
=================================================================
Total params: 400,373
Trainable params: 400,373
Non-trainable params: 0
_________________________________________________________________
###Markdown
Compile the neural network.
###Code
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Split and train the neural network.
###Code
x_val = train_examples[:10000]
partial_x_train = train_examples[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
###Output
W0819 16:45:57.484114 19696 training_utils.py:1210] When passing input data as arrays, do not specify `steps_per_epoch`/`steps` argument. Please use `batch_size` instead.
###Markdown
Evaluate the neural network.
###Code
results = model.evaluate(test_data, test_labels)
print(results)
history_dict = history.history
history_dict.keys()
%matplotlib inline
import matplotlib.pyplot as plt
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
T81-558: Applications of Deep Neural Networks**Module 9: Regularization: L1, L2 and Dropout*** Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/). Module 9 Material* Part 9.1: Introduction to Keras Transfer Learning [[Video]](https://www.youtube.com/watch?v=xyymDGReKdY&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN&index=26) [[Notebook]](t81_558_class_09_1_keras_transfer.ipynb)* Part 9.2: Popular Pretrained Neural Networks for Keras [[Video]](https://www.youtube.com/watch?v=CEFcwpBneFo&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN&index=27) [[Notebook]](t81_558_class_09_2_popular_transfer.ipynb)* Part 9.3: Transfer Learning for Computer Vision and Keras [[Video]](https://www.youtube.com/watch?v=JPqwyuK7bPg&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN&index=28) [[Notebook]](t81_558_class_09_3_transfer_cv.ipynb)* **Part 9.4: Transfer Learning for Languages and Keras** [[Video]](https://www.youtube.com/watch?v=JPqwyuK7bPg&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN&index=28) [[Notebook]](t81_558_class_09_4_transfer_nlp.ipynb)* Part 9.5: Transfer Learning for Keras Feature Engineering [[Video]](https://www.youtube.com/watch?v=JPqwyuK7bPg&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN&index=28) [[Notebook]](t81_558_class_09_5_transfer_feature_eng.ipynb) Part 9.4: Transfer Learning for Languages and KerasTransfer learning is commonly used with Natural Language Processing (NLP). This course has an entire module that covers NLP. However, for now we will look how a NLP network can be loaded into Keras for transfer learning. The following three sources were helpful for the creation of this section.* Cer, D., Yang, Y., Kong, S. Y., Hua, N., Limtiaco, N., John, R. S., ... & Sung, Y. H. (2018). [Universal sentence encoder](https://arxiv.org/abs/1803.11175). arXiv preprint arXiv:1803.11175.* [Deep Transfer Learning for Natural Language Processing — Text Classification with Universal Embeddings](https://towardsdatascience.com/deep-transfer-learning-for-natural-language-processing-text-classification-with-universal-1a2c69e5baa9)* [Keras Tutorial - How to Use Google's Universal Sentence Encoder for Spam Classification](http://hunterheidenreich.com/blog/google-universal-sentence-encoder-in-keras/)These examples make use of TensorFlow Hub, which allows pretrained models to easily be loaded into TensorFlow. To install TensorHub use the following command.
###Code
!pip install tensorflow_hub
###Output
_____no_output_____
###Markdown
It is also necessary to install TensorFlow Datasets. This can be done with the following command.
###Code
!pip install tensorflow_datasets
###Output
_____no_output_____
###Markdown
https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/tf2_text_classification.ipynbscrollTo=2ew7HTbPpCJH Load the Internet Movie DataBase (IMDB) reviews data set.
###Code
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
train_data, test_data = tfds.load(name="imdb_reviews", split=["train", "test"],
batch_size=-1, as_supervised=True)
train_examples, train_labels = tfds.as_numpy(train_data)
test_examples, test_labels = tfds.as_numpy(test_data)
# /Users/jheaton/tensorflow_datasets/imdb_reviews/plain_text/0.1.0
###Output
_____no_output_____
###Markdown
Load a pretrained embedding model called [gnews-swivel-20dim](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1). This was trained by Google on gnews data and can convert RAW text into vectors.
###Code
model = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1"
hub_layer = hub.KerasLayer(model, output_shape=[20], input_shape=[],
dtype=tf.string, trainable=True)
###Output
_____no_output_____
###Markdown
Consider the following 3 movie reviews.
###Code
train_examples[:3]
###Output
_____no_output_____
###Markdown
The embedding layer can convert each to 20-number vectors.
###Code
hub_layer(train_examples[:3])
###Output
_____no_output_____
###Markdown
We add addition layers to attempt to classify the movie reviews as either positive or negative.
###Code
model = tf.keras.Sequential()
model.add(hub_layer)
model.add(tf.keras.layers.Dense(16, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.summary()
Compile the neural network.
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Split and train the neural network.
###Code
x_val = train_examples[:10000]
partial_x_train = train_examples[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
Evaluate the neural network.
results = model.evaluate(test_data, test_labels)
print(results)
history_dict = history.history
history_dict.keys()
%matplotlib inline
import matplotlib.pyplot as plt
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
T81-558: Applications of Deep Neural Networks**Module 9: Transfer Learning*** Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/). Module 9 Material* Part 9.1: Introduction to Keras Transfer Learning [[Video]](https://www.youtube.com/watch?v=WLlP6S-Z8Xs&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_09_1_keras_transfer.ipynb)* Part 9.2: Popular Pretrained Neural Networks for Keras [[Video]](https://www.youtube.com/watch?v=ctVA1_46YEE&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_09_2_popular_transfer.ipynb)* Part 9.3: Transfer Learning for Computer Vision and Keras [[Video]](https://www.youtube.com/watch?v=61vMUm_XBMI&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_09_3_transfer_cv.ipynb)* **Part 9.4: Transfer Learning for Languages and Keras** [[Video]](https://www.youtube.com/watch?v=ajmAAg9FxXA&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_09_4_transfer_nlp.ipynb)* Part 9.5: Transfer Learning for Keras Feature Engineering [[Video]](https://www.youtube.com/watch?v=Dttxsm8zpL8&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_09_5_transfer_feature_eng.ipynb) Google CoLab InstructionsThe following code ensures that Google CoLab is running the correct version of TensorFlow.
###Code
try:
%tensorflow_version 2.x
COLAB = True
print("Note: using Google CoLab")
except:
print("Note: not using Google CoLab")
COLAB = False
###Output
Note: not using Google CoLab
###Markdown
Part 9.4: Transfer Learning for Languages and KerasYou will commonly use transfer learning in conjunction with Natural Language Processing (NLP). This course has an entire module that covers NLP. However, we will look at how you can load a network into Keras for NLP via transfer learning. The following three sources were helpful for the creation of this section.* Universal sentence encoder [[Cite:cer2018universal]](https://arxiv.org/abs/1803.11175). arXiv preprint arXiv:1803.11175)* Deep Transfer Learning for Natural Language Processing: Text Classification with Universal Embeddings [[Cite:howard2018universal]](https://towardsdatascience.com/deep-transfer-learning-for-natural-language-processing-text-classification-with-universal-1a2c69e5baa9)* [Keras Tutorial: How to Use Google's Universal Sentence Encoder for Spam Classification](http://hunterheidenreich.com/blog/google-universal-sentence-encoder-in-keras/)These examples make use of TensorFlow Hub, which allows pretrained models to be loaded into TensorFlow easily. To install TensorHub use the following commands.
###Code
!pip install tensorflow_hub
###Output
Collecting tensorflow_hub
Downloading tensorflow_hub-0.9.0-py2.py3-none-any.whl (103 kB)
Requirement already satisfied: six>=1.12.0 in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_hub) (1.15.0)
Requirement already satisfied: protobuf>=3.8.0 in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_hub) (3.12.3)
Requirement already satisfied: numpy>=1.12.0 in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_hub) (1.18.5)
Requirement already satisfied: setuptools in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from protobuf>=3.8.0->tensorflow_hub) (49.2.0.post20200714)
Installing collected packages: tensorflow-hub
Successfully installed tensorflow-hub-0.9.0
###Markdown
It is also necessary to install TensorFlow Datasets, which you can install \with the following command.
###Code
!pip install tensorflow_datasets
###Output
Collecting tensorflow_datasets
Downloading tensorflow_datasets-3.2.1-py3-none-any.whl (3.4 MB)
Requirement already satisfied: requests>=2.19.0 in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (2.24.0)
Requirement already satisfied: wrapt in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.12.1)
Collecting dill
Downloading dill-0.3.2.zip (177 kB)
Requirement already satisfied: attrs>=18.1.0 in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (19.3.0)
Requirement already satisfied: absl-py in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (0.9.0)
Requirement already satisfied: protobuf>=3.6.1 in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (3.12.3)
Collecting promise
Downloading promise-2.3.tar.gz (19 kB)
Requirement already satisfied: numpy in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.18.5)
Collecting tensorflow-metadata
Downloading tensorflow_metadata-0.23.0-py3-none-any.whl (43 kB)
Requirement already satisfied: future in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (0.18.2)
Requirement already satisfied: six in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.15.0)
Requirement already satisfied: tqdm in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (4.47.0)
Requirement already satisfied: termcolor in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from tensorflow_datasets) (1.1.0)
Requirement already satisfied: certifi>=2017.4.17 in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (2020.6.20)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (1.24.3)
Requirement already satisfied: chardet<4,>=3.0.2 in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (3.0.4)
Requirement already satisfied: idna<3,>=2.5 in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from requests>=2.19.0->tensorflow_datasets) (2.10)
Requirement already satisfied: setuptools in c:\users\jeffh\miniconda3\envs\tensorflow\lib\site-packages (from protobuf>=3.6.1->tensorflow_datasets) (49.2.0.post20200714)
Collecting googleapis-common-protos
Downloading googleapis_common_protos-1.52.0-py2.py3-none-any.whl (100 kB)
Building wheels for collected packages: dill, promise
Building wheel for dill (setup.py): started
Building wheel for dill (setup.py): finished with status 'done'
Created wheel for dill: filename=dill-0.3.2-py3-none-any.whl size=78977 sha256=c9ee55462820f4e66c44c76f46eb499453725a7339436b18eb0947153d0d2592
Stored in directory: c:\users\jeffh\appdata\local\pip\cache\wheels\72\6b\d5\5548aa1b73b8c3d176ea13f9f92066b02e82141549d90e2100
Building wheel for promise (setup.py): started
Building wheel for promise (setup.py): finished with status 'done'
###Markdown
Load the Internet Movie DataBase (IMDB) reviews data set. This example is based on a TensorFlow example that you can [find here](https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/tf2_text_classification.ipynbscrollTo=2ew7HTbPpCJH).
###Code
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
train_data, test_data = tfds.load(name="imdb_reviews",
split=["train", "test"],
batch_size=-1, as_supervised=True)
train_examples, train_labels = tfds.as_numpy(train_data)
test_examples, test_labels = tfds.as_numpy(test_data)
# /Users/jheaton/tensorflow_datasets/imdb_reviews/plain_text/0.1.0
###Output
[1mDownloading and preparing dataset imdb_reviews/plain_text/1.0.0 (download: Unknown size, generated: Unknown size, total: Unknown size) to C:\Users\jeffh\tensorflow_datasets\imdb_reviews\plain_text\1.0.0...[0m
###Markdown
Load a pretrained embedding model called [gnews-swivel-20dim](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1). This network was trained by Google on GNEWS data and can convert RAW text into vectors.
###Code
model = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1"
hub_layer = hub.KerasLayer(model, output_shape=[20], input_shape=[],
dtype=tf.string, trainable=True)
###Output
_____no_output_____
###Markdown
Consider the following three movie reviews.
###Code
train_examples[:3]
###Output
_____no_output_____
###Markdown
The embedding layer can convert each to 20-number vectors.
###Code
hub_layer(train_examples[:3])
###Output
_____no_output_____
###Markdown
We add additional layers to attempt to classify the movie reviews as either positive or negative.
###Code
model = tf.keras.Sequential()
model.add(hub_layer)
model.add(tf.keras.layers.Dense(16, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
keras_layer (KerasLayer) (None, 20) 400020
_________________________________________________________________
dense (Dense) (None, 16) 336
_________________________________________________________________
dense_1 (Dense) (None, 1) 17
=================================================================
Total params: 400,373
Trainable params: 400,373
Non-trainable params: 0
_________________________________________________________________
###Markdown
Compile the neural network.
###Code
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Split and train the neural network.
###Code
x_val = train_examples[:10000]
partial_x_train = train_examples[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
###Output
Train on 15000 samples, validate on 10000 samples
Epoch 1/40
15000/15000 [==============================] - 3s 189us/sample - loss: 0.6388 - accuracy: 0.6433 - val_loss: 0.5910 - val_accuracy: 0.6937
Epoch 2/40
15000/15000 [==============================] - 2s 143us/sample - loss: 0.5626 - accuracy: 0.7191 - val_loss: 0.5495 - val_accuracy: 0.7295
Epoch 3/40
15000/15000 [==============================] - 2s 143us/sample - loss: 0.5173 - accuracy: 0.7573 - val_loss: 0.5138 - val_accuracy: 0.7585
Epoch 4/40
15000/15000 [==============================] - 2s 145us/sample - loss: 0.4774 - accuracy: 0.7839 - val_loss: 0.4809 - val_accuracy: 0.7832
Epoch 5/40
15000/15000 [==============================] - 2s 146us/sample - loss: 0.4393 - accuracy: 0.8106 - val_loss: 0.4546 - val_accuracy: 0.7973
Epoch 6/40
15000/15000 [==============================] - 2s 146us/sample - loss: 0.4038 - accuracy: 0.8305 - val_loss: 0.4239 - val_accuracy: 0.8155
Epoch 7/40
15000/15000 [==============================] - 2s 144us/sample - loss: 0.3702 - accuracy: 0.8500 - val_loss: 0.3997 - val_accuracy: 0.8288
Epoch 8/40
15000/15000 [==============================] - 2s 143us/sample - loss: 0.3392 - accuracy: 0.8642 - val_loss: 0.3811 - val_accuracy: 0.8371
Epoch 9/40
15000/15000 [==============================] - 2s 144us/sample - loss: 0.3133 - accuracy: 0.8787 - val_loss: 0.3601 - val_accuracy: 0.8462
Epoch 10/40
15000/15000 [==============================] - 2s 144us/sample - loss: 0.2876 - accuracy: 0.8895 - val_loss: 0.3457 - val_accuracy: 0.8534
Epoch 11/40
15000/15000 [==============================] - 2s 143us/sample - loss: 0.2653 - accuracy: 0.9009 - val_loss: 0.3341 - val_accuracy: 0.8602
Epoch 12/40
15000/15000 [==============================] - 2s 142us/sample - loss: 0.2454 - accuracy: 0.9103 - val_loss: 0.3267 - val_accuracy: 0.8635
Epoch 13/40
15000/15000 [==============================] - 2s 143us/sample - loss: 0.2280 - accuracy: 0.9178 - val_loss: 0.3174 - val_accuracy: 0.8676
Epoch 14/40
15000/15000 [==============================] - 2s 142us/sample - loss: 0.2117 - accuracy: 0.9259 - val_loss: 0.3119 - val_accuracy: 0.8701
Epoch 15/40
15000/15000 [==============================] - 2s 143us/sample - loss: 0.1978 - accuracy: 0.9321 - val_loss: 0.3074 - val_accuracy: 0.8715
Epoch 16/40
15000/15000 [==============================] - 2s 141us/sample - loss: 0.1847 - accuracy: 0.9371 - val_loss: 0.3059 - val_accuracy: 0.8714
Epoch 17/40
15000/15000 [==============================] - 2s 141us/sample - loss: 0.1725 - accuracy: 0.9426 - val_loss: 0.3024 - val_accuracy: 0.8739
Epoch 18/40
15000/15000 [==============================] - 2s 143us/sample - loss: 0.1616 - accuracy: 0.9471 - val_loss: 0.3079 - val_accuracy: 0.8725
Epoch 19/40
15000/15000 [==============================] - 2s 142us/sample - loss: 0.1508 - accuracy: 0.9519 - val_loss: 0.3022 - val_accuracy: 0.8747
Epoch 20/40
15000/15000 [==============================] - 2s 145us/sample - loss: 0.1406 - accuracy: 0.9567 - val_loss: 0.3026 - val_accuracy: 0.8745
Epoch 21/40
15000/15000 [==============================] - 2s 143us/sample - loss: 0.1317 - accuracy: 0.9593 - val_loss: 0.3043 - val_accuracy: 0.8760
Epoch 22/40
15000/15000 [==============================] - 2s 141us/sample - loss: 0.1237 - accuracy: 0.9629 - val_loss: 0.3058 - val_accuracy: 0.8763
Epoch 23/40
15000/15000 [==============================] - 2s 142us/sample - loss: 0.1154 - accuracy: 0.9668 - val_loss: 0.3081 - val_accuracy: 0.8765
Epoch 24/40
15000/15000 [==============================] - 2s 141us/sample - loss: 0.1077 - accuracy: 0.9697 - val_loss: 0.3116 - val_accuracy: 0.8773
Epoch 25/40
15000/15000 [==============================] - 2s 140us/sample - loss: 0.1011 - accuracy: 0.9737 - val_loss: 0.3153 - val_accuracy: 0.8759
Epoch 26/40
15000/15000 [==============================] - 2s 141us/sample - loss: 0.0945 - accuracy: 0.9761 - val_loss: 0.3179 - val_accuracy: 0.8762
Epoch 27/40
15000/15000 [==============================] - 2s 143us/sample - loss: 0.0886 - accuracy: 0.9787 - val_loss: 0.3224 - val_accuracy: 0.8757
Epoch 28/40
15000/15000 [==============================] - 2s 141us/sample - loss: 0.0826 - accuracy: 0.9805 - val_loss: 0.3275 - val_accuracy: 0.8750
Epoch 29/40
15000/15000 [==============================] - 2s 141us/sample - loss: 0.0768 - accuracy: 0.9828 - val_loss: 0.3322 - val_accuracy: 0.8744
Epoch 30/40
15000/15000 [==============================] - 2s 140us/sample - loss: 0.0718 - accuracy: 0.9851 - val_loss: 0.3376 - val_accuracy: 0.8743
Epoch 31/40
15000/15000 [==============================] - 2s 141us/sample - loss: 0.0672 - accuracy: 0.9860 - val_loss: 0.3430 - val_accuracy: 0.8747
Epoch 32/40
15000/15000 [==============================] - 2s 140us/sample - loss: 0.0625 - accuracy: 0.9885 - val_loss: 0.3506 - val_accuracy: 0.8723
Epoch 33/40
15000/15000 [==============================] - 2s 141us/sample - loss: 0.0583 - accuracy: 0.9889 - val_loss: 0.3546 - val_accuracy: 0.8734
Epoch 34/40
15000/15000 [==============================] - 2s 141us/sample - loss: 0.0546 - accuracy: 0.9903 - val_loss: 0.3619 - val_accuracy: 0.8732
Epoch 35/40
15000/15000 [==============================] - 2s 142us/sample - loss: 0.0509 - accuracy: 0.9916 - val_loss: 0.3679 - val_accuracy: 0.8713
Epoch 36/40
15000/15000 [==============================] - 2s 142us/sample - loss: 0.0473 - accuracy: 0.9923 - val_loss: 0.3751 - val_accuracy: 0.8721
Epoch 37/40
15000/15000 [==============================] - 2s 141us/sample - loss: 0.0441 - accuracy: 0.9938 - val_loss: 0.3817 - val_accuracy: 0.8707
Epoch 38/40
15000/15000 [==============================] - 2s 143us/sample - loss: 0.0410 - accuracy: 0.9945 - val_loss: 0.3888 - val_accuracy: 0.8700
Epoch 39/40
15000/15000 [==============================] - 2s 144us/sample - loss: 0.0384 - accuracy: 0.9949 - val_loss: 0.3966 - val_accuracy: 0.8707
Epoch 40/40
15000/15000 [==============================] - 2s 145us/sample - loss: 0.0359 - accuracy: 0.9955 - val_loss: 0.4033 - val_accuracy: 0.8684
###Markdown
Evaluate the neural network.
###Code
results = model.evaluate(test_data, test_labels)
print(results)
history_dict = history.history
history_dict.keys()
%matplotlib inline
import matplotlib.pyplot as plt
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
###Output
_____no_output_____ |
LABS/Lab1.ipynb | ###Markdown
Lav1: Introduction to coding using PYTHON Student: Juan Vecino Group: Grupo B Turno 2 11/09/2020 Exercise 1: One target different possibilities
###Code
import numpy as np
import math as mt
value = 12
cube1= value**3
cube1
cube2=value*value*value
cube2
cube3 = int(mt.pow(value,3))
cube3
cube4 = np.power(value,3)
cube4
###Output
_____no_output_____
###Markdown
Exercise 2: First contact with loops
###Code
import time
seconds= range(10)
wait=1
for i in seconds:
print(i)
time.sleep(wait)
###Output
range(0, 10)
0
1
2
3
4
5
6
7
8
9
###Markdown
Explicación:Este código primero importa la librería tiempo, lo que nos va a importar los segundos/días/meses/años.Depués se define la variable "segundos" que es un rango de valores desde [0,10) (Sin estar el 10 dentro del paréntesis). Tambíen se define la variable "espera" que va a ser igual a 1.Posteriormente se pasa a hacer un loop que nos dice que por cada valor de segundos imprima un número, se espere 1 segundo y imprima el siguiente número hasta acabar con todo el rango de valores. Exercise 3: Calculate a polynomic function
###Code
X = np.linspace(-5,5,100)
X
for i in X:
Y = mt.sqrt(mt.pow(i,4)+mt.pow(i,3)+mt.pow(i,2)+mt.pow(i,1)+1)
print(Y)
###Output
22.825424421026653
21.87532290457873
20.945612887857454
20.036294550667755
19.147368199410387
18.278834295152603
17.4306934876723
16.602946656836206
15.795594963012745
15.0086399086545
14.242083413741504
13.495927908493861
12.770176447690732
12.06483285214179
11.3799018844402
10.715389468210393
10.071302962824657
9.447651509241538
8.844446467552185
8.261701973477635
7.699435650099772
7.157669523461461
6.63643120765268
6.13575544847497
5.655686147383436
5.196279032853064
4.757605209740746
4.339755905529941
3.9428488543230285
3.5670369248506737
3.2125198147565444
2.8795598939320786
2.5685035377740415
2.2798093920759097
2.0140845401974135
1.772127511046062
1.5549713609381646
1.36390687578687
1.2004406851919607
1.066107793116274
0.9620429579695005
0.8883014636635651
0.843173742402832
0.8229885367698734
0.8227262756319079
0.8371619356791724
0.8619085184078479
0.893983754200869
0.9319283217689761
0.9756656136862026
1.02625101377675
1.0855769518888605
1.1560553728431853
1.2402950542994704
1.3408056219550997
1.45976887343407
1.598907148912167
1.7594518438671236
1.9421890039000347
2.1475468168430547
2.375693183572022
2.626623039595313
2.9002267838606417
3.1963390881907787
3.5147712009125014
3.855330910167036
4.217833976911625
4.602110008141612
5.0080048850870105
5.43538116447439
5.8841173627292696
6.3541066856630755
6.845255538927004
7.3574820105340955
7.890714427965367
8.444890039054087
9.019953834783703
9.615857514780005
10.232558587198112
10.870019590512063
11.5282074233266
12.207092768482715
12.906649598667864
13.626854752026357
14.3676875676469
15.129129572146798
15.911164209808382
16.713776609827022
17.536953385193396
18.38068245856429
19.24495291118653
20.12975485154122
21.03507930088605
21.960918093303995
22.907263788228644
23.874109593722903
24.861449299044025
25.86927721524519
26.8975881227468
27.94637722496424
###Markdown
ExplanationAqui lo que se ha hecho es poner un loop que va a cogernos de nuevo cada valor de "X" y nos va a ejecutar la función, para posteriormente mostrarnos sus 10 resultados para cada uno de los diferentes valores de "X" (ente -5,5). Exercise 4: Plot a polynomic functionThe function to be plot is this:$$\sqrt{x^4+x^3+x^2+x+1}$$
###Code
import matplotlib.pyplot as plt
y = []
for i in X:
Y = np.sqrt(np.power(i,4)+np.power(i,3)+np.power(i,2)+np.power(i,1)+1)
y.append(Y)
plt.plot(X,y, label= r'$g(x) = \sqrt{x^4+x^3+x^2+x+1}$')
plt.xlabel('Eje x')
plt.ylabel('Eje y')
plt.title('Función')
plt.legend()
###Output
_____no_output_____
###Markdown
Exercise 5: Update existing values ExplanationLo que sucedia antes es que había muy pocos puntos entonces al trazar la recta entre estos, había mucha diferencia de pendiente lo que hacía que fuese muy poco preciso el gráfico de la función, sin embargo al añadir 90 puntos más ahora el gráfico de la función es mucho más suave y no se notael cambio de pendiente entre puntos lo que hace mucho más legible el grafico. Excersice 6: Plot two functionsThe functions to be plot are this:$$g(x) = \sqrt{x^4+x^3+x^2+x+1}$$$$f(x) = \frac{\log{x^4 + x^3 + x^2 + x + 1}}{\log(x^4)}$$
###Code
#Gráfia 1
plt.plot(X,y, 'r', label = r'$g(x) = \sqrt{x^4+x^3+x^2+x+1}$')
plt.xlabel('Eje x')
plt.ylabel('Eje y')
plt.title('Funciones Juntas')
#Gráfica 2
y2 = []
for i in X:
funcion2= (mt.log((mt.pow(i ,4 )+mt.pow(i,3)+mt.pow(i,2)+mt.pow(i,1)+1), 10))/(mt.log(mt.pow(i,4), 10))
y2.append(funcion2)
plt.plot(X,y2, 'g', label= r'$f(x) = \frac{\log{x^4 + x^3 + x^2 + x + 1}}{\log(x^4)}$')
plt.legend()
###Output
_____no_output_____ |
Week2/.ipynb_checkpoints/Week2Tutorial1-checkpoint.ipynb | ###Markdown
Week 2: Linear Algebra II Tutorial 1 [insert your name]**Important reminders**: Before starting, click "File -> Save a copy in Drive". Produce a pdf for submission by "File -> Print" and then choose "Save to PDF".To complete this tutorial, you should have watched Videos 2.1 through 2.6. **Credits:**The videos you watched for this week were from 3Blue1Brown. Some elements of this problem set are from or inspired by https://openedx.seas.gwu.edu/courses/course-v1:GW+EngComp4+2019/about. In particular, we are using their `plot_linear_transformation` and `plot_linear_transformations` functions, and the demonstration of the additional transformation of a matrix inverse (end of Exercise 2)
###Code
# Imports
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import scipy.linalg
# Plotting parameters
matplotlib.rcParams.update({'font.size': 22})
# @title Plotting functions
import numpy
from numpy.linalg import inv, eig
from math import ceil
from matplotlib import pyplot, ticker, get_backend, rc
from mpl_toolkits.mplot3d import Axes3D
from itertools import cycle
_int_backends = ['GTK3Agg', 'GTK3Cairo', 'MacOSX', 'nbAgg',
'Qt4Agg', 'Qt4Cairo', 'Qt5Agg', 'Qt5Cairo',
'TkAgg', 'TkCairo', 'WebAgg', 'WX', 'WXAgg', 'WXCairo']
_backend = get_backend() # get current backend name
# shrink figsize and fontsize when using %matplotlib notebook
if _backend in _int_backends:
fontsize = 4
fig_scale = 0.75
else:
fontsize = 5
fig_scale = 1
grey = '#808080'
gold = '#cab18c' # x-axis grid
lightblue = '#0096d6' # y-axis grid
green = '#008367' # x-axis basis vector
red = '#E31937' # y-axis basis vector
darkblue = '#004065'
pink, yellow, orange, purple, brown = '#ef7b9d', '#fbd349', '#ffa500', '#a35cff', '#731d1d'
quiver_params = {'angles': 'xy',
'scale_units': 'xy',
'scale': 1,
'width': 0.012}
grid_params = {'linewidth': 0.5,
'alpha': 0.8}
def set_rc(func):
def wrapper(*args, **kwargs):
rc('font', family='serif', size=fontsize)
rc('figure', dpi=200)
rc('axes', axisbelow=True, titlesize=5)
rc('lines', linewidth=1)
func(*args, **kwargs)
return wrapper
@set_rc
def plot_vector(vectors, tails=None):
''' Draw 2d vectors based on the values of the vectors and the position of their tails.
Parameters
----------
vectors : list.
List of 2-element array-like structures, each represents a 2d vector.
tails : list, optional.
List of 2-element array-like structures, each represents the coordinates of the tail
of the corresponding vector in vectors. If None (default), all tails are set at the
origin (0,0). If len(tails) is 1, all tails are set at the same position. Otherwise,
vectors and tails must have the same length.
Examples
--------
>>> v = [(1, 3), (3, 3), (4, 6)]
>>> plot_vector(v) # draw 3 vectors with their tails at origin
>>> t = [numpy.array((2, 2))]
>>> plot_vector(v, t) # draw 3 vectors with their tails at (2,2)
>>> t = [[3, 2], [-1, -2], [3, 5]]
>>> plot_vector(v, t) # draw 3 vectors with 3 different tails
'''
vectors = numpy.array(vectors)
assert vectors.shape[1] == 2, "Each vector should have 2 elements."
if tails is not None:
tails = numpy.array(tails)
assert tails.shape[1] == 2, "Each tail should have 2 elements."
else:
tails = numpy.zeros_like(vectors)
# tile vectors or tails array if needed
nvectors = vectors.shape[0]
ntails = tails.shape[0]
if nvectors == 1 and ntails > 1:
vectors = numpy.tile(vectors, (ntails, 1))
elif ntails == 1 and nvectors > 1:
tails = numpy.tile(tails, (nvectors, 1))
else:
assert tails.shape == vectors.shape, "vectors and tail must have a same shape"
# calculate xlimit & ylimit
heads = tails + vectors
limit = numpy.max(numpy.abs(numpy.hstack((tails, heads))))
limit = numpy.ceil(limit * 1.2) # add some margins
figsize = numpy.array([2,2]) * fig_scale
figure, axis = pyplot.subplots(figsize=figsize)
axis.quiver(tails[:,0], tails[:,1], vectors[:,0], vectors[:,1], color=darkblue,
angles='xy', scale_units='xy', scale=1)
axis.set_xlim([-limit, limit])
axis.set_ylim([-limit, limit])
axis.set_aspect('equal')
# if xticks and yticks of grid do not match, choose the finer one
xticks = axis.get_xticks()
yticks = axis.get_yticks()
dx = xticks[1] - xticks[0]
dy = yticks[1] - yticks[0]
base = max(int(min(dx, dy)), 1) # grid interval is always an integer
loc = ticker.MultipleLocator(base=base)
axis.xaxis.set_major_locator(loc)
axis.yaxis.set_major_locator(loc)
axis.grid(True, **grid_params)
# show x-y axis in the center, hide frames
axis.spines['left'].set_position('center')
axis.spines['bottom'].set_position('center')
axis.spines['right'].set_color('none')
axis.spines['top'].set_color('none')
@set_rc
def plot_transformation_helper(axis, matrix, *vectors, unit_vector=True, unit_circle=False, title=None):
""" A helper function to plot the linear transformation defined by a 2x2 matrix.
Parameters
----------
axis : class matplotlib.axes.Axes.
The axes to plot on.
matrix : class numpy.ndarray.
The 2x2 matrix to visualize.
*vectors : class numpy.ndarray.
The vector(s) to plot along with the linear transformation. Each array denotes a vector's
coordinates before the transformation and must have a shape of (2,). Accept any number of vectors.
unit_vector : bool, optional.
Whether to plot unit vectors of the standard basis, default to True.
unit_circle: bool, optional.
Whether to plot unit circle, default to False.
title: str, optional.
Title of the plot.
"""
assert matrix.shape == (2,2), "the input matrix must have a shape of (2,2)"
grid_range = 20
x = numpy.arange(-grid_range, grid_range+1)
X_, Y_ = numpy.meshgrid(x,x)
I = matrix[:,0]
J = matrix[:,1]
X = I[0]*X_ + J[0]*Y_
Y = I[1]*X_ + J[1]*Y_
origin = numpy.zeros(1)
# draw grid lines
for i in range(x.size):
axis.plot(X[i,:], Y[i,:], c=gold, **grid_params)
axis.plot(X[:,i], Y[:,i], c=lightblue, **grid_params)
# draw (transformed) unit vectors
if unit_vector:
axis.quiver(origin, origin, [I[0]], [I[1]], color=green, **quiver_params)
axis.quiver(origin, origin, [J[0]], [J[1]], color=red, **quiver_params)
# draw optional vectors
color_cycle = cycle([pink, darkblue, orange, purple, brown])
if vectors:
for vector in vectors:
color = next(color_cycle)
vector_ = matrix @ vector.reshape(-1,1)
axis.quiver(origin, origin, [vector_[0]], [vector_[1]], color=color, **quiver_params)
# draw optional unit circle
if unit_circle:
alpha = numpy.linspace(0, 2*numpy.pi, 41)
circle = numpy.vstack((numpy.cos(alpha), numpy.sin(alpha)))
circle_trans = matrix @ circle
axis.plot(circle_trans[0], circle_trans[1], color=red, lw=0.8)
# hide frames, set xlimit & ylimit, set title
limit = 4
axis.spines['left'].set_position('center')
axis.spines['bottom'].set_position('center')
axis.spines['left'].set_linewidth(0.3)
axis.spines['bottom'].set_linewidth(0.3)
axis.spines['right'].set_color('none')
axis.spines['top'].set_color('none')
axis.set_xlim([-limit, limit])
axis.set_ylim([-limit, limit])
if title is not None:
axis.set_title(title)
@set_rc
def plot_linear_transformation(matrix, *vectors, unit_vector=True, unit_circle=False):
""" Plot the linear transformation defined by a 2x2 matrix using the helper
function plot_transformation_helper(). It will create 2 subplots to visualize some
vectors before and after the transformation.
Parameters
----------
matrix : class numpy.ndarray.
The 2x2 matrix to visualize.
*vectors : class numpy.ndarray.
The vector(s) to plot along with the linear transformation. Each array denotes a vector's
coordinates before the transformation and must have a shape of (2,). Accept any number of vectors.
unit_vector : bool, optional.
Whether to plot unit vectors of the standard basis, default to True.
unit_circle: bool, optional.
Whether to plot unit circle, default to False.
"""
figsize = numpy.array([4,2]) * fig_scale
figure, (axis1, axis2) = pyplot.subplots(1, 2, figsize=figsize)
plot_transformation_helper(axis1, numpy.identity(2), *vectors, unit_vector=unit_vector, unit_circle=unit_circle, title='Before transformation')
plot_transformation_helper(axis2, matrix, *vectors, unit_vector=unit_vector, unit_circle=unit_circle, title='After transformation')
@set_rc
def plot_linear_transformations(*matrices, unit_vector=True, unit_circle=False):
""" Plot the linear transformation defined by a sequence of n 2x2 matrices using the helper
function plot_transformation_helper(). It will create n+1 subplots to visualize some
vectors before and after each transformation.
Parameters
----------
*matrices : class numpy.ndarray.
The 2x2 matrices to visualize. Accept any number of matrices.
unit_vector : bool, optional.
Whether to plot unit vectors of the standard basis, default to True.
unit_circle: bool, optional.
Whether to plot unit circle, default to False.
"""
nplots = len(matrices) + 1
nx = 2
ny = ceil(nplots/nx)
figsize = numpy.array([2*nx, 2*ny]) * fig_scale
figure, axes = pyplot.subplots(nx, ny, figsize=figsize)
for i in range(nplots): # fig_idx
if i == 0:
matrix_trans = numpy.identity(2)
title = 'Before transformation'
else:
matrix_trans = matrices[i-1] @ matrix_trans
if i == 1:
title = 'After {} transformation'.format(i)
else:
title = 'After {} transformations'.format(i)
plot_transformation_helper(axes[i//nx, i%nx], matrix_trans, unit_vector=unit_vector, unit_circle=unit_circle, title=title)
# hide axes of the extra subplot (only when nplots is an odd number)
if nx*ny > nplots:
axes[-1,-1].axis('off')
###Output
_____no_output_____
###Markdown
Key concept review & coding tips Linear transformations and matrices* A matrix is basically a table of numbers. * We can represent matrices with numpy arrays, which we create as a list of rows: \begin{bmatrix}4 & 1 & 2\\3 & 2 & 0\\end{bmatrix} would be `np.array([[4, 1, 2], [3, 2, 0]])`* Linear transformations take in an input vector and outputs a transformed vector. Under a linear transformation, all grid line remain parallel and evenly spaced and the origin remains fixed (it must preserve vector addition and scalar multiplication).* Matrices represent linear transformations: each column corresponds to where the corresponding standard basis vector ends up after the transformation* We can think of the matrix vector multiplication $A\bar{x}=\bar{b}$ as a linear transformation where $A$ acts on $\bar{x}$ to produce $\bar{b}$. An alternate view is to think of it as solving a system of linear equations. * `np.linalg.solve` solves matrix vector equations like the above * As an example, solving $A\bar{x}=\bar{b}$ is equivalent to solving the system of linear equations: $$ \begin{align} 2x_1 + 3x_2 &= 6 \\ x_1 + 4x_2 &= 1 \end{align}$$$$\text{if } A = \begin{bmatrix}2 & 3 \\1 & 4\\end{bmatrix}, \bar{b} =\begin{bmatrix}6 \\1\\end{bmatrix}$$ Matrix multiplication* We can envision matrix multiplication as the composition of transformations. If C = AB, element $c_{ij}$ (the element of C in the ith row and jth column) equals the dot product of the ith row of A and the jth column of B. * There are several ways to do matrix multiplication in Python: we can use `np.dot(A, B)`, `np.matmul(A, B)` or use a special operator @ so `A @ B` Determinants* The determinant of a matrix (det A) is a scalar value that can be viewed as describing the area changes induced by the corresponding linear transformation. It is negative if the linear transformation reverses the orientation of the space. * `np.linalg.det(A)` computes the determinant Inverse matrices, column space, and null space* We can sometimes take the inverse of a matrix so that $A^{-1}A = I$ where $I$ is the identity matrix (all zeros except for ones on the diagonal). * We can use `np.linalg.inv(A)` to compute $A^{-1}$ when it exists * `np.eye(d)` gives us the identity matrix of dimension d* The column space of a matrix is the span of the columns of the matrix. This is equivalent to the range of the linear transformation where, in informal language, the range is everywhere that can be "gotten to" by the transformation. In other words, the range is the set of all vectors that the linear transformation maps to.* The rank of a matrix is the dimension of the column space. * `np.linalg.matrix_rank(A)` computes the rank* The null space of a matrix is the set of all vectors that land on the origin after the resulting transformation. In other words, it is the set of all solutions of $A\bar{x} = \bar{0}$. * You can use `scipy.linalg.null_space` to find a basis for the null space of a matrix.* If the matrix A is $m$ x $n$, the null space must be a subspace of $R^n$ and the column space must be a subspace of $R^m$. Exercise 1: Computation cornerFor each computation below, please calculate it 1) by-hand and 2) using code. Check that the answers match! For by-hand calculation, please show some work when possible. For example, for matrix multiplication, write out the computation of each element in the resulting matrix so it looks something like this:$$A = \begin{bmatrix}5*2+4*1 & 3*5+1*2 \\0*1+1*2 & 3*2+4*5 \\\end{bmatrix} $$Note that these are completely made up numbers for demonstration purposes - the above numbers don't make sense for a matrix multiplication. A) Matrix multiplicationPlease compute C = AB where $$A = \begin{bmatrix}5 & 3 \\0 & 2 \\\end{bmatrix}, B = \begin{bmatrix}1 & 5 \\4 & 3 \\\end{bmatrix} $$
###Code
$$C = \begin{bmatrix}
5 * 0 + & 3 \\
0 & 2 \\
\end{bmatrix}, B = \begin{bmatrix}
1 & 5 \\
4 & 3 \\
\end{bmatrix} $$
###Output
_____no_output_____
###Markdown
**Your math answer**$$C = \begin{bmatrix}1 * 5 + 4 * 3 & 5*5 + 3 * 3 \\1 * 0 + 4 * 2 & 5*0 + 3 * 2 \\\end{bmatrix}$$
###Code
# Your code answer
A = np.array([[5, 3],
[0, 2]])
B = np.array([[1, 5],
[4, 3]])
np.matmul(A,B)
###Output
_____no_output_____
###Markdown
B) Matrix multiplicationPlease compute Z = XY where $$X = \begin{bmatrix}3 & 2 & 1 \\1 & 2 & 7 \\\end{bmatrix}, Y = \begin{bmatrix}0 & 1 \\2 & 4 \\5 & 1 \\\end{bmatrix} $$ Before computing, figure out what the dimensions of Z will be (no need to explicitly answer this) **Your math answer**The matrix will have row,col of X,Y, so 2 row by 2 col$$X = \begin{bmatrix}3 * 0 + 2 * 2 + 1 * 5 & 3 * 1 + 2 * 4 + 1 * 1 \\1 * 0 + 2 * 2 + 5 * 7 & 1 * 1 + 2 * 4 + 1 * 7\\\end{bmatrix}$$
###Code
A = np.array([[3, 2, 1],
[1, 2, 7]])
B = np.array([[0, 1],
[2, 4],
[5,1]])
np.matmul(A,B)
###Output
_____no_output_____
###Markdown
C) (Optional) Transpose**Please come back to this problem if you complete the rest of the tutorial during class time.**The **tranpose** of a matrix flips a matrix over its diagonal, changing the rows to columns and the columns to rows. We denote the transpose of matrix X with $X^T$. In numpy, we can get the transpose of an array X with `X.T`. First, write out the transpose of X from part B yourself and then produce it using code. **Your math answer**It's like computing Z = XY where $$X = \begin{bmatrix}9 & 12 \\39 & 16 \\\end{bmatrix}, Y = \begin{bmatrix}0 & 1 \\2 & 4 \\5 & 1 \\\end{bmatrix} $$
###Code
# Your code answer
np.matmul(A,B).T
np.matmul(np.matmul(A,B) , np.array([[0, -1], [1,0]]))
###Output
_____no_output_____
###Markdown
You could not compute $X^TY$ - why not? **Your text answer**The shapes don't match along dimension 1 Exercise 2: Thinking about transformations In the video *Linear transformations and matrices*, you learned that a matrix corresponding to a rotation by 90 degrees is $$X = \begin{bmatrix}0 & -1 \\1 & 0 \\\end{bmatrix}$$ You also saw that one matrix for which the transformation is horizontal shear is $$X = \begin{bmatrix}1 & 1 \\0 & 1 \\\end{bmatrix}$$In this exercise, we will think about some other types of transformations. We will use `plot_linear_transformation(X)` to see the grid before and after the transformation corresponding to matrix $X$.**Remember to think about where your basis vectors should end up! Then your matrix consists of the transformed basis vectors. Drawing out what you want to happen can help** A) Reflection across x2 axisCome up with a matrix $A$ for which the corresponding linear transformation is reflection through the $x_2$ axis (flipping across the $x_2$ axis). For example, $\bar{x} = \begin{bmatrix}1 \\5 \\\end{bmatrix}$ should become $\bar{b} = \begin{bmatrix}-1 \\5 \\\end{bmatrix}$ when multiplied with $A$.
###Code
A = np.array([[1, 0],
[0, 1]])
plot_linear_transformation(A)
###Output
_____no_output_____
###Markdown
Would you expect the determinant of A to be positive or negative? Why? Would you expect the absolute value of the determinant to be greater than 1, less than 1, or equal to 1. Why? **Your text answer** B) Projection onto x1Come up with a matrix $A$ for which the corresponding linear transformation is projecting onto the $x_1$ axis. For example, $\bar{x} = \begin{bmatrix}1 \\5 \\\end{bmatrix}$ should become $\bar{b} = \begin{bmatrix}1 \\0 \\\end{bmatrix}$ when multiplied with $A$.
###Code
A = ...
plot_linear_transformation(A)
###Output
_____no_output_____
###Markdown
Would you expect the determinant of A to be positive or negative? Why? Would you expect the absolute value of the determinant to be greater than 1, less than 1, or equal to 1. Why? **Your text answer** C) (Optional) Figuring out the transformation from a matrix**Please come back to this problem if you complete the rest of the tutorial during class time.** $$A = \begin{bmatrix}3 & 1 \\0 & 3 \\\end{bmatrix}$$Try to answer the below questions without looking at the plot of the transformation, but feel free to do so if you get stucki) This matrix is a composition of two basic transformations, where possible basic transformations are reflection, contraction, expansion, horizontal shear, vertical shear, and projection. What are the two basic transformations it is a composition of? (Hint: does this matrix look at all like either of the two in the description below Exercise 2?)ii) Would you expect the determinant of A to be positive or negative? Why? Would you expect the absolute value of the determinant to be greater than 1, less than 1, or equal to 1. Why?iii) Rewrite A as a matrix multiplication of two matrices where each matrix corresponds to one of the basic transformations. **Your text answer**
###Code
A = np.array([[3, 1], [0, 3]])
#plot_linear_transformation(A)
###Output
_____no_output_____
###Markdown
Extra info: Matrix inverse transformationWe know that the inverse of a matrix essentially "undoes" the transformation of the matrix. Let's see this in action. We will plot the transformation of A then the additional transformation of $A^{-1}$ - the resulting plot should look like the original.
###Code
A = np.array([[1,2], [2,1]])
A_inv = np.linalg.inv(M)
plot_linear_transformations(A, A_inv)
###Output
_____no_output_____
###Markdown
Exercise 3: Encoding model matricesLet's say we have a population of 3 visual neurons that respond to 3 pixel images. Each neural response is a weighted sum of the pixel image: we used this type of model in Week 1 Part 1 Exercise 2. We will now allow the pixels to have negative values.We will look at two different populations of 3 neurons with different weights from the pixels: population f and population g. Below, we have the system of linear equations that dictates the neuron models for each population. $x_1$, $x_2$, and $x_3$ correspond to the pixel values. $r_{f1}$, $r_{f2}$, and $r_{f3}$ correspond to the responses of neurons 1, 2, and 3 in population f. $r_{g1}$, $r_{g2}$, and $r_{g3}$ correspond to the responses of neurons 1, 2, and 3 in population g. Population f:$$\begin{align}x_1 + 3x_2 + 4x_3 &= r_{f1} \\2x_1 + x_2 + 4x_3 &= r_{f2} \\x_1 + 5x_2 + 6x_3 &= r_{f3} \\\end{align}$$Population g:$$\begin{align}x_2 + x_3 &= r_{g1} \\6x_1 + 10x_2 &= r_{g2} \\3x_1 + 6x_2 + x_3 &= r_{g3} \\\end{align}$$ A) Rewriting linear systems of equations to matrix equationWe want to rewrite the above system of equations for each population in the matrix equation $F\bar{x} = \bar{r}_f$ where $\bar{x}$ is the image and $\bar{r}_f$ is the vector of neural responses in population f. What is F?We will do the same for population g: $G\bar{x} = \bar{r}_g$ where $\bar{r}_g$ is the vector of neural responses in population g. What is G? **Your math answer** We started with the linear system of equations view but, as always, we can think about this matrix equation in terms of a linear transformation. In particular matrices F and G are transforming vectors from a "pixel basis", where each element of a vector represents one pixel to a "neural basis" where each element represents the response of one neuron. B) Solving a matrix equationWe will now try to solve the matrix equation to find $\bar{x}$ for a given $\bar{r}_f$. What does this correspond to in the neuroscience setting (aka what is $\bar{x}$ here)? **Your text answer** Find $\bar{x}$ if $$\bar{r}_f = \begin{bmatrix}1 \\1 \\2 \\\end{bmatrix}$$We will use two different coding methods: you will first use `np.linalg.inv`, and then `np.linalg.solve`.
###Code
# Define F
F = ...
# Define r_f
r_f = ...
# Find x using np.linalg.inv
x_using_inv = ...
# Find x using np.linalg.solve
x_using_solve = ...
# Check each method resulted in the same x
if np.all(np.isclose(x_using_inv, x_using_solve)):
print('Solutions match')
else:
print('PROBLEM: Solutions do not match!')
###Output
_____no_output_____
###Markdown
C) Solving another matrix equationTry to repeat the steps in B for population g where $$\bar{r}_g = \begin{bmatrix}1 \\1 \\2 \\\end{bmatrix}$$What problem do you run into?
###Code
# Define G
G = ...
# Define r_g
r_g = ...
# Find x using np.linalg.inv
x_using_inv = ...
# Find x using np.linalg.solve
x_using_solve = ...
# Check each method resulted in the same x
if np.all(np.isclose(x_using_inv, x_using_solve)):
print('Solutions match')
else:
print('PROBLEM: Solutions do not match!')
###Output
_____no_output_____
###Markdown
D) Calculate the rank of F/GFirst think: from the video *Inverse Matrices, column space, and null space*, we know that if a n x n matrix is invertible, the matrix is not "squishing" space: all of $R^n$ can be reached via the transformation. Based on this, what do you expect the ranks of F and G to be based on parts B/C? (no need to explicitly answer, just discuss)Now compute the rank of each below and see if you were right.
###Code
rank_F = ...
rank_G = ...
print('The rank of F is '+str(rank_F))
print('The rank of G is '+str(rank_G))
###Output
_____no_output_____
###Markdown
E) Linearly independent or dependent columnsAre the columns of F linearly dependent of independent? How do you know? How about the columns of G? How do you know? (Hint: use the words rank and column space in your answer) **Your text answer** F) Finding the null spaceUse `scipy.linalg.null_space` to find the basis of the null spaces for F and G.
###Code
F_null_space = ...
G_null_space = ...
###Output
_____no_output_____ |
r_examples/r_byo_r_algo_hpo/tune_r_bring_your_own.ipynb | ###Markdown
Hyperparameter Tuning Your Own R Algorithm with Your Own Container in Amazon SageMaker_**Using Amazon SageMaker's Hyperparameter Tuning with a customer Docker container and R algorithm**_------ Contents1. [Background](Background)1. [Setup](Setup) 1. [Permissions](Permissions)1. [Code](Code) 1. [Publish](Publish)1. [Data](Data)1. [Tune](Tune)1. [HPO Analysis](HPO-Analysis)1. [Host](Host)1. [Predict](Predict)1. [(Optional) Clean-up]((Optional)-Clean-up)1. [Wrap-up](Wrap-up)--- BackgroundR is a popular open source statistical programming language, with a lengthy history in Data Science and Machine Learning. The breadth of algorithms available as R packages is impressive and fuels a diverse community of users. In this example, we'll combine one of those algorithms ([Multivariate Adaptive Regression Splines](https://en.wikipedia.org/wiki/Multivariate_adaptive_regression_splines)) with SageMaker's hyperparameter tuning capabilities to build a simple model on the well-known [Iris dataset](https://en.wikipedia.org/wiki/Iris_flower_data_set). This notebook will focus mainly on the integration of hyperparameter tuning and a custom algorithm container, as well as hosting the tuned model and making inference using the endpoint. For more examples, please see this [notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/advanced_functionality/r_bring_your_own/r_bring_your_own.ipynb).--- Setup_This notebook was created and tested on an ml.m4.xlarge notebook instance._Let's start by specifying:- The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the notebook instance, training, and hosting.- The IAM role arn used to give training and hosting access to your data. See the [documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/using-identity-based-policies.html) for more details on creating these. Note, if a role not associated with the current notebook instance, or more than one role is required for training and/or hosting, please replace `sagemaker.get_execution_role()` with a the appropriate full IAM role arn string(s).
###Code
import sagemaker
bucket = sagemaker.Session().default_bucket()
prefix = 'sagemaker/DEMO-hpo-r-byo'
role = sagemaker.get_execution_role()
###Output
_____no_output_____
###Markdown
Now we'll import the libraries we'll need for the remainder of the notebook.
###Code
import os
import boto3
import sagemaker
import pandas as pd
from sagemaker.tuner import IntegerParameter, CategoricalParameter, ContinuousParameter, HyperparameterTuner
###Output
_____no_output_____
###Markdown
PermissionsRunning this notebook requires permissions in addition to the normal `SageMakerFullAccess` permissions. This is because we'll be creating a new repository in Amazon ECR. The easiest way to add these permissions is simply to add the managed policy `AmazonEC2ContainerRegistryFullAccess` to the role associated with your notebook instance. There's no need to restart your notebook instance when you do this, the new permissions will be available immediately.--- CodeFor this example, we'll need 3 supporting code files. We'll provide just a brief overview of what each one does. See the full R bring your own notebook for more details.- **Fit**: `mars.R` creates functions to train and serve our model.- **Serve**: `plumber.R` uses the [plumber](https://www.rplumber.io/) package to create a lightweight HTTP server for processing requests in hosting. Note the specific syntax, and see the plumber help docs for additional detail on more specialized use cases.- **Dockerfile**: This specifies the configuration for our docker container. Smaller containers are preferred for Amazon SageMaker as they lead to faster spin up times in training and endpoint creation, so this container is kept minimal. It simply starts with Ubuntu, installs R, mda, and plumber libraries, then adds `mars.R` and `plumber.R`, and finally sets `mars.R` to run as the entrypoint when launched. PublishNow, to publish this container to ECR, we'll run the comands below.This command will take several minutes to run the first time.
###Code
algorithm_name = 'rmars'
%%sh
# The name of our algorithm
algorithm_name=rmars
#set -e # stop if anything fails
account=$(aws sts get-caller-identity --query Account --output text)
# Get the region defined in the current configuration (default to us-west-2 if none defined)
region=$(aws configure get region)
region=${region:-us-east-1}
fullname="${account}.dkr.ecr.${region}.amazonaws.com/${algorithm_name}:latest"
# If the repository doesn't exist in ECR, create it.
aws ecr describe-repositories --repository-names "${algorithm_name}" > /dev/null 2>&1
if [ $? -ne 0 ]
then
aws ecr create-repository --repository-name "${algorithm_name}" > /dev/null
fi
# Get the login command from ECR and execute it directly
$(aws ecr get-login --region ${region} --no-include-email)
# Build the docker image locally with the image name and then push it to ECR
# with the full name.
docker build -t ${algorithm_name} .
docker tag ${algorithm_name} ${fullname}
docker push ${fullname}
###Output
_____no_output_____
###Markdown
--- DataFor this illustrative example, we'll simply use `iris`. This a classic, but small, dataset used to test supervised learning algorithms. Typically the goal is to predict one of three flower species based on various measurements of the flowers' attributes. Further detail can be found [here](https://en.wikipedia.org/wiki/Iris_flower_data_set).Let's split the data to train and test datasets (70% / 30%) and then copy the data to S3 so that SageMaker training can access it.
###Code
data = pd.read_csv('iris.csv')
# Train/test split, 70%-30%
train_data = data.sample(frac=0.7, random_state=42)
test_data = data.drop(train_data.index)
test_data.head()
# Write to csv
train_data.to_csv('iris_train.csv', index=False)
test_data.to_csv('iris_test.csv', index=False)
# write to S3
train_file = 'iris_train.csv'
boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'train', train_file)).upload_file(train_file)
###Output
_____no_output_____
###Markdown
_Note: Although we could do preliminary data transformations in the notebook, we'll avoid doing so, instead choosing to do those transformations inside the container. This is not typically the best practice for model efficiency, but provides some benefits in terms of flexibility._ --- TuneNow, let's setup the information needed to train a Multivariate Adaptive Regression Splines model on `iris` data. In this case, we'll predict `Sepal.Length` rather than the more typical classification of `Species` in order to show how factors might be included in a model and to limit the use case to regression.First, we'll get our region and account information so that we can point to the ECR container we just created.
###Code
region = boto3.Session().region_name
account = boto3.client('sts').get_caller_identity().get('Account')
###Output
_____no_output_____
###Markdown
Now we'll create an estimator using the [SageMaker Python SDK](https://github.com/aws/sagemaker-python-sdk). This allows us to specify:- The training container image in ECR- The IAM role that controls permissions for accessing the S3 data and executing SageMaker functions- Number and type of training instances- S3 path for model artifacts to be output to- Any hyperparameters that we want to have the same value across all training jobs during tuning
###Code
estimator = sagemaker.estimator.Estimator(
image_uri='{}.dkr.ecr.{}.amazonaws.com/rmars:latest'.format(account, region),
role=role,
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
output_path='s3://{}/{}/output'.format(bucket, prefix),
sagemaker_session=sagemaker.Session(),
hyperparameters={'degree': 2}) # Setting constant hyperparameter
# target is by defauld "Sepal.Length". See mars.R where this is set.
###Output
_____no_output_____
###Markdown
Once we've defined our estimator we can specify the hyperparameters that we'd like to tune and their possible values. We have three different types of hyperparameters.- Categorical parameters need to take one value from a discrete set. We define this by passing the list of possible values to `CategoricalParameter(list)`- Continuous parameters can take any real number value between the minimum and maximum value, defined by `ContinuousParameter(min, max)`- Integer parameters can take any integer value between the minimum and maximum value, defined by `IntegerParameter(min, max)`*Note, if possible, it's almost always best to specify a value as the least restrictive type. For example, tuning `thresh` as a continuous value between 0.01 and 0.2 is likely to yield a better result than tuning as a categorical parameter with possible values of 0.01, 0.1, 0.15, or 0.2.*
###Code
# to set the degree as a varying HP to tune, use: 'degree': IntegerParameter(1, 3) and remove it from the Estimator
hyperparameter_ranges = {'thresh': ContinuousParameter(0.001, 0.01),
'prune': CategoricalParameter(['TRUE', 'FALSE'])}
###Output
_____no_output_____
###Markdown
Next we'll specify the objective metric that we'd like to tune and its definition. This metric is output by a `print` statement in our `mars.R` file. Its critical that the format aligns with the regular expression (Regex) we then specify to extract that metric from the CloudWatch logs of our training job.
###Code
objective_metric_name = 'mse'
metric_definitions = [{'Name': 'mse',
'Regex': 'mse: ([0-9\\.]+)'}]
###Output
_____no_output_____
###Markdown
Now, we'll create a `HyperparameterTuner` object, which we pass:- The MXNet estimator we created above- Our hyperparameter ranges- Objective metric name and definition- Whether we should maximize or minimize our objective metric (defaults to 'Maximize')- Number of training jobs to run in total and how many training jobs should be run simultaneously. More parallel jobs will finish tuning sooner, but may sacrifice accuracy. We recommend you set the parallel jobs value to less than 10% of the total number of training jobs (we'll set it higher just for this example to keep it short).
###Code
tuner = HyperparameterTuner(estimator,
objective_metric_name,
hyperparameter_ranges,
metric_definitions,
objective_type='Minimize',
max_jobs=9,
max_parallel_jobs=3)
###Output
_____no_output_____
###Markdown
And finally, we can start our hyperparameter tuning job by calling `.fit()` and passing in the S3 paths to our train and test datasets.*Note, typically for hyperparameter tuning, we'd want to specify both a training and validation (or test) dataset and optimize the objective metric from the validation dataset. However, because `iris` is a very small dataset we'll skip the step of splitting into training and validation. In practice, doing this could lead to a model that overfits to our training data and does not generalize well.*
###Code
tuner.fit({'train': 's3://{}/{}/train'.format(bucket, prefix)})
###Output
_____no_output_____
###Markdown
Let's just run a quick check of the hyperparameter tuning jobs status to make sure it started successfully and is `InProgress`.
###Code
import time
status = boto3.client('sagemaker').describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name)['HyperParameterTuningJobStatus']
while status != "Completed":
status = boto3.client('sagemaker').describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name)['HyperParameterTuningJobStatus']
completed = boto3.client('sagemaker').describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name)['TrainingJobStatusCounters']['Completed']
prog = boto3.client('sagemaker').describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name)['TrainingJobStatusCounters']['InProgress']
print(f'{status}, Completed Jobs: {completed}, In Progress Jobs: {prog}')
time.sleep(30)
###Output
_____no_output_____
###Markdown
Wait until the HPO job is complete, and then run the following cell:
###Code
boto3.client('sagemaker').describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name)['BestTrainingJob']
###Output
_____no_output_____
###Markdown
--- HPO AnalysisNow that we've started our hyperparameter tuning job, it will run in the background and we can close this notebook. Once finished, we can use the [HPO Analysis notebook](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/hyperparameter_tuning/analyze_results/HPO_Analyze_TuningJob_Results.ipynb) to determine which set of hyperparameters worked best.For more detail on Amazon SageMaker's Hyperparameter Tuning, please refer to the AWS documentation. --- HostHosting the model we just tuned takes three steps in Amazon SageMaker. First, we define the model we want to host, pointing the service to the model artifact our training job just wrote to S3.We will use the results of the HPO for this purpose, but using `hyper_parameter_tuning_job` method.
###Code
best_training = boto3.client('sagemaker').describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name)['BestTrainingJob']
# Get the best trainig job and S3 location for the model file
best_model_s3 = boto3.client('sagemaker').describe_training_job(
TrainingJobName=best_training['TrainingJobName'])['ModelArtifacts']['S3ModelArtifacts']
best_model_s3
import time
r_job = 'DEMO-r-byo-' + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
r_hosting_container = {
'Image': '{}.dkr.ecr.{}.amazonaws.com/{}:latest'.format(account, region, algorithm_name),
'ModelDataUrl': best_model_s3
}
create_model_response = boto3.client('sagemaker').create_model(
ModelName=r_job,
ExecutionRoleArn=role,
PrimaryContainer=r_hosting_container)
print(create_model_response['ModelArn'])
###Output
_____no_output_____
###Markdown
Next, let's create an endpoing configuration, passing in the model we just registered. In this case, we'll only use a few c4.xlarges.
###Code
r_endpoint_config = 'DEMO-r-byo-config-' + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
print(r_endpoint_config)
create_endpoint_config_response = boto3.client('sagemaker').create_endpoint_config(
EndpointConfigName=r_endpoint_config,
ProductionVariants=[{
'InstanceType': 'ml.t2.medium',
'InitialInstanceCount': 1,
'ModelName': r_job,
'VariantName': 'AllTraffic'}])
print("Endpoint Config Arn: " + create_endpoint_config_response['EndpointConfigArn'])
###Output
_____no_output_____
###Markdown
Finally, we'll create the endpoints using our endpoint configuration from the last step.
###Code
%%time
r_endpoint = 'DEMO-r-endpoint-' + time.strftime("%Y%m%d%H%M", time.gmtime())
print(r_endpoint)
create_endpoint_response = boto3.client('sagemaker').create_endpoint(
EndpointName=r_endpoint,
EndpointConfigName=r_endpoint_config)
print(create_endpoint_response['EndpointArn'])
resp = boto3.client('sagemaker').describe_endpoint(EndpointName=r_endpoint)
status = resp['EndpointStatus']
print("Status: " + status)
try:
boto3.client('sagemaker').get_waiter('endpoint_in_service').wait(EndpointName=r_endpoint)
finally:
resp = boto3.client('sagemaker').describe_endpoint(EndpointName=r_endpoint)
status = resp['EndpointStatus']
print("Arn: " + resp['EndpointArn'])
print("Status: " + status)
if status != 'InService':
raise Exception('Endpoint creation did not succeed')
###Output
_____no_output_____
###Markdown
--- PredictTo confirm our endpoints are working properly, let's try to invoke the endpoint._Note: The payload we're passing in the request is a CSV string with a header record, followed by multiple new lines. It also contains text columns, which the serving code converts to the set of indicator variables needed for our model predictions. Again, this is not a best practice for highly optimized code, however, it showcases the flexibility of bringing your own algorithm._
###Code
import pandas as pd
import json
iris_test = pd.read_csv('iris_test.csv')
runtime = boto3.Session().client('runtime.sagemaker')
%%time
# there is a limit of max 500 samples at a time for invoking endpoints
payload = iris_test.drop(['Sepal.Length'], axis=1).to_csv(index=False)
response = runtime.invoke_endpoint(EndpointName=r_endpoint,
ContentType='text/csv',
Body=payload)
result = json.loads(response['Body'].read().decode())
display(result)
###Output
_____no_output_____
###Markdown
We can see the result is a CSV of predictions for our target variable. Let's compare them to the actuals to see how our model did.
###Code
import matplotlib.pyplot as plt
import numpy as np
plt.scatter(iris_test['Sepal.Length'], np.fromstring(result[0], sep=','), alpha=0.4, s=50)
plt.xlabel('Sepal Length(Actual)')
plt.ylabel('Sepal Length(Prediction)')
x = np.linspace(*plt.xlim())
plt.plot(x, x, linestyle='--', color='g', linewidth=1)
plt.xlim(4,8)
plt.ylim(4,8)
plt.show()
###Output
_____no_output_____
###Markdown
(Optional) Clean-upIf you're ready to be done with this notebook, please run the cell below. This will remove the hosted endpoint you created and avoid any charges from a stray instance being left on.
###Code
boto3.client('sagemaker').delete_endpoint(EndpointName=r_endpoint)
###Output
_____no_output_____
###Markdown
Hyperparameter Tuning Your Own R Algorithm with Your Own Container in Amazon SageMaker_**Using Amazon SageMaker's Hyperparameter Tuning with a customer Docker container and R algorithm**_---**Read before running the notebook:**- This notebook has been updated to SageMaker v2.0- Use Python3 kernel for this notebook.- Dockerfile has been updated to use [Amazon ECR Public Gallery](https://docs.aws.amazon.com/AmazonECR/latest/public/public-gallery.html) Contents1. [Background](Background)1. [Setup](Setup) 1. [Permissions](Permissions)1. [Code](Code) 1. [Publish](Publish)1. [Data](Data)1. [Tune](Tune)1. [HPO Analysis](HPO-Analysis)1. [Host](Host)1. [Predict](Predict)1. [(Optional) Clean-up]((Optional)-Clean-up)1. [Wrap-up](Wrap-up)--- BackgroundR is a popular open source statistical programming language, with a lengthy history in Data Science and Machine Learning. The breadth of algorithms available as R packages is impressive and fuels a diverse community of users. In this example, we'll combine one of those algorithms ([Multivariate Adaptive Regression Splines](https://en.wikipedia.org/wiki/Multivariate_adaptive_regression_splines)) with SageMaker's hyperparameter tuning capabilities to build a simple model on the well-known [Iris dataset](https://en.wikipedia.org/wiki/Iris_flower_data_set). This notebook will focus mainly on the integration of hyperparameter tuning and a custom algorithm container, as well as hosting the tuned model and making inference using the endpoint. For more examples, please see this [notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/advanced_functionality/r_bring_your_own/r_bring_your_own.ipynb).--- Setup_This notebook was created and tested on an ml.m4.xlarge notebook instance._Let's start by specifying:- The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the notebook instance, training, and hosting.- The IAM role arn used to give training and hosting access to your data. See the [documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/using-identity-based-policies.html) for more details on creating these. Note, if a role not associated with the current notebook instance, or more than one role is required for training and/or hosting, please replace `sagemaker.get_execution_role()` with a the appropriate full IAM role arn string(s).
###Code
import sagemaker
bucket = sagemaker.Session().default_bucket()
prefix = "sagemaker/DEMO-hpo-r-byo"
role = sagemaker.get_execution_role()
###Output
_____no_output_____
###Markdown
Now we'll import the libraries we'll need for the remainder of the notebook.
###Code
import os
import boto3
import sagemaker
import pandas as pd
from sagemaker.tuner import (
IntegerParameter,
CategoricalParameter,
ContinuousParameter,
HyperparameterTuner,
)
###Output
_____no_output_____
###Markdown
PermissionsRunning this notebook requires permissions in addition to the normal `SageMakerFullAccess` permissions. This is because we'll be creating a new repository in Amazon ECR. The easiest way to add these permissions is simply to add the managed policy `AmazonEC2ContainerRegistryFullAccess` to the role associated with your notebook instance. There's no need to restart your notebook instance when you do this, the new permissions will be available immediately.--- CodeFor this example, we'll need 3 supporting code files. We'll provide just a brief overview of what each one does. See the full R bring your own notebook for more details.- **Fit**: `mars.R` creates functions to train and serve our model.- **Serve**: `plumber.R` uses the [plumber](https://www.rplumber.io/) package to create a lightweight HTTP server for processing requests in hosting. Note the specific syntax, and see the plumber help docs for additional detail on more specialized use cases.- **Dockerfile**: This specifies the configuration for our docker container. Smaller containers are preferred for Amazon SageMaker as they lead to faster spin up times in training and endpoint creation, so this container is kept minimal. This docker file starts with base R image, installs `plumber` and `mda` libraries and their dependecies, then adds `mars.R` and `plumber.R`, and finally sets `mars.R` to run as the entrypoint when launched. - **Upodate:** The updated dockerfile leverages public R-Base image from [Amazon Public ECR Gallery](https://aws.amazon.com/about-aws/whats-new/2020/12/announcing-amazon-ecr-public-and-amazon-ecr-public-gallery/) which has been available since December 2020. Feel free to read more about this public gallery and browse for public images at https://gallery.ecr.aws/. PublishNow, to publish this container to ECR, we'll run the comands below.This command will take several minutes to run the first time.
###Code
algorithm_name = "rmars"
%%sh
# The name of our algorithm
algorithm_name=rmars
#set -e # stop if anything fails
account=$(aws sts get-caller-identity --query Account --output text)
# Get the region defined in the current configuration (default to us-west-2 if none defined)
region=$(aws configure get region)
region=${region:-us-east-1}
fullname="${account}.dkr.ecr.${region}.amazonaws.com/${algorithm_name}:latest"
# Get the login command from ECR and execute it directly
aws ecr get-login-password --region ${region} | docker login --username AWS --password-stdin ${account}.dkr.ecr.${region}.amazonaws.com
# If the repository doesn't exist in ECR, create it.
aws ecr describe-repositories --repository-names "${algorithm_name}" > /dev/null 2>&1
if [ $? -ne 0 ]
then
aws ecr create-repository --repository-name "${algorithm_name}" > /dev/null
fi
# Build the docker image locally with the image name and then push it to ECR
# with the full name.
docker build -t ${algorithm_name} .
docker tag ${algorithm_name} ${fullname}
docker push ${fullname}
###Output
_____no_output_____
###Markdown
--- DataFor this illustrative example, we'll simply use `iris`. This a classic, but small, dataset used to test supervised learning algorithms. Typically, the goal is to predict one of three flower species based on various measurements of the flowers' attributes. Further details can be found [here](https://archive.ics.uci.edu/ml/datasets/iris).* **Source:** Dua, D. and Graff, C. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science.Let's split the data to train and test datasets (70% / 30%) and then copy the data to S3 so that SageMaker training can access it.
###Code
column_names = ["Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width", "Species"]
data = pd.read_csv(
"s3://sagemaker-sample-files/datasets/tabular/iris/iris.data",
names=column_names,
)
# Train/test split, 70%-30%
train_data = data.sample(frac=0.7, random_state=42)
test_data = data.drop(train_data.index)
test_data.head()
# Write to csv
train_data.to_csv("iris_train.csv", index=False)
test_data.to_csv("iris_test.csv", index=False)
# write to S3
train_file = "iris_train.csv"
boto3.Session().resource("s3").Bucket(bucket).Object(
os.path.join(prefix, "train", train_file)
).upload_file(train_file)
###Output
_____no_output_____
###Markdown
_Note: Although we could do preliminary data transformations in the notebook, we'll avoid doing so, instead choosing to do those transformations inside the container. This is not typically the best practice for model efficiency, but provides some benefits in terms of flexibility._ --- TuneNow, let's setup the information needed to train a Multivariate Adaptive Regression Splines model on `iris` data. In this case, we'll predict `Sepal.Length` rather than the more typical classification of `Species` in order to show how factors might be included in a model and to limit the use case to regression.First, we'll get our region and account information so that we can point to the ECR container we just created.
###Code
region = boto3.Session().region_name
account = boto3.client("sts").get_caller_identity().get("Account")
###Output
_____no_output_____
###Markdown
Now we'll create an estimator using the [SageMaker Python SDK](https://github.com/aws/sagemaker-python-sdk). This allows us to specify:- The training container image in ECR- The IAM role that controls permissions for accessing the S3 data and executing SageMaker functions- Number and type of training instances- S3 path for model artifacts to be output to- Any hyperparameters that we want to have the same value across all training jobs during tuning
###Code
estimator = sagemaker.estimator.Estimator(
image_uri="{}.dkr.ecr.{}.amazonaws.com/rmars:latest".format(account, region),
role=role,
instance_count=1,
instance_type="ml.m4.xlarge",
output_path="s3://{}/{}/output".format(bucket, prefix),
sagemaker_session=sagemaker.Session(),
hyperparameters={"degree": 2},
) # Setting constant hyperparameter
# target is by defauld "Sepal.Length". See mars.R where this is set.
###Output
_____no_output_____
###Markdown
Once we've defined our estimator we can specify the hyperparameters that we'd like to tune and their possible values. We have three different types of hyperparameters.- Categorical parameters need to take one value from a discrete set. We define this by passing the list of possible values to `CategoricalParameter(list)`- Continuous parameters can take any real number value between the minimum and maximum value, defined by `ContinuousParameter(min, max)`- Integer parameters can take any integer value between the minimum and maximum value, defined by `IntegerParameter(min, max)`*Note, if possible, it's almost always best to specify a value as the least restrictive type. For example, tuning `thresh` as a continuous value between 0.01 and 0.2 is likely to yield a better result than tuning as a categorical parameter with possible values of 0.01, 0.1, 0.15, or 0.2.*
###Code
# to set the degree as a varying HP to tune, use: 'degree': IntegerParameter(1, 3) and remove it from the Estimator
hyperparameter_ranges = {
"thresh": ContinuousParameter(0.001, 0.01),
"prune": CategoricalParameter(["TRUE", "FALSE"]),
}
###Output
_____no_output_____
###Markdown
Next we'll specify the objective metric that we'd like to tune and its definition. This metric is output by a `print` statement in our `mars.R` file. Its critical that the format aligns with the regular expression (Regex) we then specify to extract that metric from the CloudWatch logs of our training job.
###Code
objective_metric_name = "mse"
metric_definitions = [{"Name": "mse", "Regex": "mse: ([0-9\\.]+)"}]
###Output
_____no_output_____
###Markdown
Now, we'll create a `HyperparameterTuner` object, which we pass:- The MXNet estimator we created above- Our hyperparameter ranges- Objective metric name and definition- Whether we should maximize or minimize our objective metric (defaults to 'Maximize')- Number of training jobs to run in total and how many training jobs should be run simultaneously. More parallel jobs will finish tuning sooner, but may sacrifice accuracy. We recommend you set the parallel jobs value to less than 10% of the total number of training jobs (we'll set it higher just for this example to keep it short).
###Code
tuner = HyperparameterTuner(
estimator,
objective_metric_name,
hyperparameter_ranges,
metric_definitions,
objective_type="Minimize",
max_jobs=9,
max_parallel_jobs=3,
)
###Output
_____no_output_____
###Markdown
And finally, we can start our hyperparameter tuning job by calling `.fit()` and passing in the S3 paths to our train and test datasets.*Note, typically for hyperparameter tuning, we'd want to specify both a training and validation (or test) dataset and optimize the objective metric from the validation dataset. However, because `iris` is a very small dataset we'll skip the step of splitting into training and validation. In practice, doing this could lead to a model that overfits to our training data and does not generalize well.*
###Code
tuner.fit({"train": "s3://{}/{}/train".format(bucket, prefix)}, wait=False)
###Output
_____no_output_____
###Markdown
Let's just run a quick check of the hyperparameter tuning jobs status to make sure it started successfully and is `InProgress`.
###Code
import time
status = boto3.client("sagemaker").describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name
)["HyperParameterTuningJobStatus"]
while status != "Completed":
status = boto3.client("sagemaker").describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name
)["HyperParameterTuningJobStatus"]
completed = boto3.client("sagemaker").describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name
)["TrainingJobStatusCounters"]["Completed"]
prog = boto3.client("sagemaker").describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name
)["TrainingJobStatusCounters"]["InProgress"]
print(f"{status}, Completed Jobs: {completed}, In Progress Jobs: {prog}")
time.sleep(30)
###Output
_____no_output_____
###Markdown
Wait until the HPO job is complete, and then run the following cell:
###Code
boto3.client("sagemaker").describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name
)["BestTrainingJob"]
###Output
_____no_output_____
###Markdown
--- HPO AnalysisNow that we've started our hyperparameter tuning job, it will run in the background and we can close this notebook. Once finished, we can use the [HPO Analysis notebook](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/hyperparameter_tuning/analyze_results/HPO_Analyze_TuningJob_Results.ipynb) to determine which set of hyperparameters worked best.For more detail on Amazon SageMaker's Hyperparameter Tuning, please refer to the AWS documentation. --- HostHosting the model we just tuned takes three steps in Amazon SageMaker. First, we define the model we want to host, pointing the service to the model artifact our training job just wrote to S3.We will use the results of the HPO for this purpose, but using `hyper_parameter_tuning_job` method.
###Code
best_training = boto3.client("sagemaker").describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name
)["BestTrainingJob"]
# Get the best trainig job and S3 location for the model file
best_model_s3 = boto3.client("sagemaker").describe_training_job(
TrainingJobName=best_training["TrainingJobName"]
)["ModelArtifacts"]["S3ModelArtifacts"]
best_model_s3
import time
r_job = "DEMO-r-byo-" + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
r_hosting_container = {
"Image": "{}.dkr.ecr.{}.amazonaws.com/{}:latest".format(account, region, algorithm_name),
"ModelDataUrl": best_model_s3,
}
create_model_response = boto3.client("sagemaker").create_model(
ModelName=r_job, ExecutionRoleArn=role, PrimaryContainer=r_hosting_container
)
print(create_model_response["ModelArn"])
###Output
_____no_output_____
###Markdown
Next, let's create an endpoing configuration, passing in the model we just registered. In this case, we'll only use a few c4.xlarges.
###Code
r_endpoint_config = "DEMO-r-byo-config-" + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
print(r_endpoint_config)
create_endpoint_config_response = boto3.client("sagemaker").create_endpoint_config(
EndpointConfigName=r_endpoint_config,
ProductionVariants=[
{
"InstanceType": "ml.t2.medium",
"InitialInstanceCount": 1,
"ModelName": r_job,
"VariantName": "AllTraffic",
}
],
)
print("Endpoint Config Arn: " + create_endpoint_config_response["EndpointConfigArn"])
###Output
_____no_output_____
###Markdown
Finally, we'll create the endpoints using our endpoint configuration from the last step.
###Code
%%time
r_endpoint = "DEMO-r-endpoint-" + time.strftime("%Y%m%d%H%M", time.gmtime())
print(r_endpoint)
create_endpoint_response = boto3.client("sagemaker").create_endpoint(
EndpointName=r_endpoint, EndpointConfigName=r_endpoint_config
)
print(create_endpoint_response["EndpointArn"])
resp = boto3.client("sagemaker").describe_endpoint(EndpointName=r_endpoint)
status = resp["EndpointStatus"]
print("Status: " + status)
try:
boto3.client("sagemaker").get_waiter("endpoint_in_service").wait(EndpointName=r_endpoint)
finally:
resp = boto3.client("sagemaker").describe_endpoint(EndpointName=r_endpoint)
status = resp["EndpointStatus"]
print("Arn: " + resp["EndpointArn"])
print("Status: " + status)
if status != "InService":
raise Exception("Endpoint creation did not succeed")
###Output
_____no_output_____
###Markdown
--- PredictTo confirm our endpoints are working properly, let's try to invoke the endpoint._Note: The payload we're passing in the request is a CSV string with a header record, followed by multiple new lines. It also contains text columns, which the serving code converts to the set of indicator variables needed for our model predictions. Again, this is not a best practice for highly optimized code, however, it showcases the flexibility of bringing your own algorithm._
###Code
import pandas as pd
import json
iris_test = pd.read_csv("iris_test.csv")
runtime = boto3.Session().client("runtime.sagemaker")
%%time
# there is a limit of max 500 samples at a time for invoking endpoints
payload = iris_test.drop(["Sepal.Length"], axis=1).to_csv(index=False)
response = runtime.invoke_endpoint(EndpointName=r_endpoint, ContentType="text/csv", Body=payload)
result = json.loads(response["Body"].read().decode())
display(result)
###Output
_____no_output_____
###Markdown
We can see the result is a CSV of predictions for our target variable. Let's compare them to the actuals to see how our model did.
###Code
import matplotlib.pyplot as plt
import numpy as np
plt.scatter(iris_test["Sepal.Length"], np.fromstring(result[0], sep=","), alpha=0.4, s=50)
plt.xlabel("Sepal Length(Actual)")
plt.ylabel("Sepal Length(Prediction)")
x = np.linspace(*plt.xlim())
plt.plot(x, x, linestyle="--", color="g", linewidth=1)
plt.xlim(4, 8)
plt.ylim(4, 8)
plt.show()
###Output
_____no_output_____
###Markdown
(Optional) Clean-upIf you're ready to be done with this notebook, please run the cell below. This will remove the hosted endpoint you created and avoid any charges from a stray instance being left on.
###Code
boto3.client("sagemaker").delete_endpoint(EndpointName=r_endpoint)
###Output
_____no_output_____
###Markdown
Hyperparameter Tuning Your Own R Algorithm with Your Own Container in Amazon SageMaker_**Using Amazon SageMaker's Hyperparameter Tuning with a customer Docker container and R algorithm**_---**Read before running the notebook:**- This notebook has been updated to SageMaker v2.0- Use Python3 kernel for this notebook.- Dockerfile has been updated to use [Amazon ECR Public Gallery](https://docs.aws.amazon.com/AmazonECR/latest/public/public-gallery.html) Contents1. [Background](Background)1. [Setup](Setup) 1. [Permissions](Permissions)1. [Code](Code) 1. [Publish](Publish)1. [Data](Data)1. [Tune](Tune)1. [HPO Analysis](HPO-Analysis)1. [Host](Host)1. [Predict](Predict)1. [(Optional) Clean-up]((Optional)-Clean-up)1. [Wrap-up](Wrap-up)--- BackgroundR is a popular open source statistical programming language, with a lengthy history in Data Science and Machine Learning. The breadth of algorithms available as R packages is impressive and fuels a diverse community of users. In this example, we'll combine one of those algorithms ([Multivariate Adaptive Regression Splines](https://en.wikipedia.org/wiki/Multivariate_adaptive_regression_splines)) with SageMaker's hyperparameter tuning capabilities to build a simple model on the well-known [Iris dataset](https://en.wikipedia.org/wiki/Iris_flower_data_set). This notebook will focus mainly on the integration of hyperparameter tuning and a custom algorithm container, as well as hosting the tuned model and making inference using the endpoint. For more examples, please see this [notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/advanced_functionality/r_bring_your_own/r_bring_your_own.ipynb).--- Setup_This notebook was created and tested on an ml.m4.xlarge notebook instance._Let's start by specifying:- The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the notebook instance, training, and hosting.- The IAM role arn used to give training and hosting access to your data. See the [documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/using-identity-based-policies.html) for more details on creating these. Note, if a role not associated with the current notebook instance, or more than one role is required for training and/or hosting, please replace `sagemaker.get_execution_role()` with a the appropriate full IAM role arn string(s).
###Code
import sagemaker
bucket = sagemaker.Session().default_bucket()
prefix = 'sagemaker/DEMO-hpo-r-byo'
role = sagemaker.get_execution_role()
###Output
_____no_output_____
###Markdown
Now we'll import the libraries we'll need for the remainder of the notebook.
###Code
import os
import boto3
import sagemaker
import pandas as pd
from sagemaker.tuner import IntegerParameter, CategoricalParameter, ContinuousParameter, HyperparameterTuner
###Output
_____no_output_____
###Markdown
PermissionsRunning this notebook requires permissions in addition to the normal `SageMakerFullAccess` permissions. This is because we'll be creating a new repository in Amazon ECR. The easiest way to add these permissions is simply to add the managed policy `AmazonEC2ContainerRegistryFullAccess` to the role associated with your notebook instance. There's no need to restart your notebook instance when you do this, the new permissions will be available immediately.--- CodeFor this example, we'll need 3 supporting code files. We'll provide just a brief overview of what each one does. See the full R bring your own notebook for more details.- **Fit**: `mars.R` creates functions to train and serve our model.- **Serve**: `plumber.R` uses the [plumber](https://www.rplumber.io/) package to create a lightweight HTTP server for processing requests in hosting. Note the specific syntax, and see the plumber help docs for additional detail on more specialized use cases.- **Dockerfile**: This specifies the configuration for our docker container. Smaller containers are preferred for Amazon SageMaker as they lead to faster spin up times in training and endpoint creation, so this container is kept minimal. This docker file starts with base R image, installs `plumber` and `mda` libraries and their dependecies, then adds `mars.R` and `plumber.R`, and finally sets `mars.R` to run as the entrypoint when launched. - **Upodate:** The updated dockerfile leverages public R-Base image from [Amazon Public ECR Gallery](https://aws.amazon.com/about-aws/whats-new/2020/12/announcing-amazon-ecr-public-and-amazon-ecr-public-gallery/) which has been available since December 2020. Feel free to read more about this public gallery and browse for public images at https://gallery.ecr.aws/. PublishNow, to publish this container to ECR, we'll run the comands below.This command will take several minutes to run the first time.
###Code
algorithm_name = 'rmars'
%%sh
# The name of our algorithm
algorithm_name=rmars
#set -e # stop if anything fails
account=$(aws sts get-caller-identity --query Account --output text)
# Get the region defined in the current configuration (default to us-west-2 if none defined)
region=$(aws configure get region)
region=${region:-us-east-1}
fullname="${account}.dkr.ecr.${region}.amazonaws.com/${algorithm_name}:latest"
# If the repository doesn't exist in ECR, create it.
aws ecr describe-repositories --repository-names "${algorithm_name}" > /dev/null 2>&1
if [ $? -ne 0 ]
then
aws ecr create-repository --repository-name "${algorithm_name}" > /dev/null
fi
# Get the login command from ECR and execute it directly
$(aws ecr get-login --region ${region} --no-include-email)
# Build the docker image locally with the image name and then push it to ECR
# with the full name.
docker build -t ${algorithm_name} .
docker tag ${algorithm_name} ${fullname}
docker push ${fullname}
###Output
_____no_output_____
###Markdown
--- DataFor this illustrative example, we'll simply use `iris`. This a classic, but small, dataset used to test supervised learning algorithms. Typically the goal is to predict one of three flower species based on various measurements of the flowers' attributes. Further detail can be found [here](https://en.wikipedia.org/wiki/Iris_flower_data_set).Let's split the data to train and test datasets (70% / 30%) and then copy the data to S3 so that SageMaker training can access it.
###Code
data = pd.read_csv('iris.csv')
# Train/test split, 70%-30%
train_data = data.sample(frac=0.7, random_state=42)
test_data = data.drop(train_data.index)
test_data.head()
# Write to csv
train_data.to_csv('iris_train.csv', index=False)
test_data.to_csv('iris_test.csv', index=False)
# write to S3
train_file = 'iris_train.csv'
boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'train', train_file)).upload_file(train_file)
###Output
_____no_output_____
###Markdown
_Note: Although we could do preliminary data transformations in the notebook, we'll avoid doing so, instead choosing to do those transformations inside the container. This is not typically the best practice for model efficiency, but provides some benefits in terms of flexibility._ --- TuneNow, let's setup the information needed to train a Multivariate Adaptive Regression Splines model on `iris` data. In this case, we'll predict `Sepal.Length` rather than the more typical classification of `Species` in order to show how factors might be included in a model and to limit the use case to regression.First, we'll get our region and account information so that we can point to the ECR container we just created.
###Code
region = boto3.Session().region_name
account = boto3.client('sts').get_caller_identity().get('Account')
###Output
_____no_output_____
###Markdown
Now we'll create an estimator using the [SageMaker Python SDK](https://github.com/aws/sagemaker-python-sdk). This allows us to specify:- The training container image in ECR- The IAM role that controls permissions for accessing the S3 data and executing SageMaker functions- Number and type of training instances- S3 path for model artifacts to be output to- Any hyperparameters that we want to have the same value across all training jobs during tuning
###Code
estimator = sagemaker.estimator.Estimator(
image_uri='{}.dkr.ecr.{}.amazonaws.com/rmars:latest'.format(account, region),
role=role,
instance_count=1,
instance_type='ml.m4.xlarge',
output_path='s3://{}/{}/output'.format(bucket, prefix),
sagemaker_session=sagemaker.Session(),
hyperparameters={'degree': 2}) # Setting constant hyperparameter
# target is by defauld "Sepal.Length". See mars.R where this is set.
###Output
_____no_output_____
###Markdown
Once we've defined our estimator we can specify the hyperparameters that we'd like to tune and their possible values. We have three different types of hyperparameters.- Categorical parameters need to take one value from a discrete set. We define this by passing the list of possible values to `CategoricalParameter(list)`- Continuous parameters can take any real number value between the minimum and maximum value, defined by `ContinuousParameter(min, max)`- Integer parameters can take any integer value between the minimum and maximum value, defined by `IntegerParameter(min, max)`*Note, if possible, it's almost always best to specify a value as the least restrictive type. For example, tuning `thresh` as a continuous value between 0.01 and 0.2 is likely to yield a better result than tuning as a categorical parameter with possible values of 0.01, 0.1, 0.15, or 0.2.*
###Code
# to set the degree as a varying HP to tune, use: 'degree': IntegerParameter(1, 3) and remove it from the Estimator
hyperparameter_ranges = {'thresh': ContinuousParameter(0.001, 0.01),
'prune': CategoricalParameter(['TRUE', 'FALSE'])}
###Output
_____no_output_____
###Markdown
Next we'll specify the objective metric that we'd like to tune and its definition. This metric is output by a `print` statement in our `mars.R` file. Its critical that the format aligns with the regular expression (Regex) we then specify to extract that metric from the CloudWatch logs of our training job.
###Code
objective_metric_name = 'mse'
metric_definitions = [{'Name': 'mse',
'Regex': 'mse: ([0-9\\.]+)'}]
###Output
_____no_output_____
###Markdown
Now, we'll create a `HyperparameterTuner` object, which we pass:- The MXNet estimator we created above- Our hyperparameter ranges- Objective metric name and definition- Whether we should maximize or minimize our objective metric (defaults to 'Maximize')- Number of training jobs to run in total and how many training jobs should be run simultaneously. More parallel jobs will finish tuning sooner, but may sacrifice accuracy. We recommend you set the parallel jobs value to less than 10% of the total number of training jobs (we'll set it higher just for this example to keep it short).
###Code
tuner = HyperparameterTuner(estimator,
objective_metric_name,
hyperparameter_ranges,
metric_definitions,
objective_type='Minimize',
max_jobs=9,
max_parallel_jobs=3)
###Output
_____no_output_____
###Markdown
And finally, we can start our hyperparameter tuning job by calling `.fit()` and passing in the S3 paths to our train and test datasets.*Note, typically for hyperparameter tuning, we'd want to specify both a training and validation (or test) dataset and optimize the objective metric from the validation dataset. However, because `iris` is a very small dataset we'll skip the step of splitting into training and validation. In practice, doing this could lead to a model that overfits to our training data and does not generalize well.*
###Code
tuner.fit({'train': 's3://{}/{}/train'.format(bucket, prefix)},
wait=False)
###Output
_____no_output_____
###Markdown
Let's just run a quick check of the hyperparameter tuning jobs status to make sure it started successfully and is `InProgress`.
###Code
import time
status = boto3.client('sagemaker').describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name)['HyperParameterTuningJobStatus']
while status != "Completed":
status = boto3.client('sagemaker').describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name)['HyperParameterTuningJobStatus']
completed = boto3.client('sagemaker').describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name)['TrainingJobStatusCounters']['Completed']
prog = boto3.client('sagemaker').describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name)['TrainingJobStatusCounters']['InProgress']
print(f'{status}, Completed Jobs: {completed}, In Progress Jobs: {prog}')
time.sleep(30)
###Output
_____no_output_____
###Markdown
Wait until the HPO job is complete, and then run the following cell:
###Code
boto3.client('sagemaker').describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name)['BestTrainingJob']
###Output
_____no_output_____
###Markdown
--- HPO AnalysisNow that we've started our hyperparameter tuning job, it will run in the background and we can close this notebook. Once finished, we can use the [HPO Analysis notebook](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/hyperparameter_tuning/analyze_results/HPO_Analyze_TuningJob_Results.ipynb) to determine which set of hyperparameters worked best.For more detail on Amazon SageMaker's Hyperparameter Tuning, please refer to the AWS documentation. --- HostHosting the model we just tuned takes three steps in Amazon SageMaker. First, we define the model we want to host, pointing the service to the model artifact our training job just wrote to S3.We will use the results of the HPO for this purpose, but using `hyper_parameter_tuning_job` method.
###Code
best_training = boto3.client('sagemaker').describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name)['BestTrainingJob']
# Get the best trainig job and S3 location for the model file
best_model_s3 = boto3.client('sagemaker').describe_training_job(
TrainingJobName=best_training['TrainingJobName'])['ModelArtifacts']['S3ModelArtifacts']
best_model_s3
import time
r_job = 'DEMO-r-byo-' + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
r_hosting_container = {
'Image': '{}.dkr.ecr.{}.amazonaws.com/{}:latest'.format(account, region, algorithm_name),
'ModelDataUrl': best_model_s3
}
create_model_response = boto3.client('sagemaker').create_model(
ModelName=r_job,
ExecutionRoleArn=role,
PrimaryContainer=r_hosting_container)
print(create_model_response['ModelArn'])
###Output
_____no_output_____
###Markdown
Next, let's create an endpoing configuration, passing in the model we just registered. In this case, we'll only use a few c4.xlarges.
###Code
r_endpoint_config = 'DEMO-r-byo-config-' + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
print(r_endpoint_config)
create_endpoint_config_response = boto3.client('sagemaker').create_endpoint_config(
EndpointConfigName=r_endpoint_config,
ProductionVariants=[{
'InstanceType': 'ml.t2.medium',
'InitialInstanceCount': 1,
'ModelName': r_job,
'VariantName': 'AllTraffic'}])
print("Endpoint Config Arn: " + create_endpoint_config_response['EndpointConfigArn'])
###Output
_____no_output_____
###Markdown
Finally, we'll create the endpoints using our endpoint configuration from the last step.
###Code
%%time
r_endpoint = 'DEMO-r-endpoint-' + time.strftime("%Y%m%d%H%M", time.gmtime())
print(r_endpoint)
create_endpoint_response = boto3.client('sagemaker').create_endpoint(
EndpointName=r_endpoint,
EndpointConfigName=r_endpoint_config)
print(create_endpoint_response['EndpointArn'])
resp = boto3.client('sagemaker').describe_endpoint(EndpointName=r_endpoint)
status = resp['EndpointStatus']
print("Status: " + status)
try:
boto3.client('sagemaker').get_waiter('endpoint_in_service').wait(EndpointName=r_endpoint)
finally:
resp = boto3.client('sagemaker').describe_endpoint(EndpointName=r_endpoint)
status = resp['EndpointStatus']
print("Arn: " + resp['EndpointArn'])
print("Status: " + status)
if status != 'InService':
raise Exception('Endpoint creation did not succeed')
###Output
_____no_output_____
###Markdown
--- PredictTo confirm our endpoints are working properly, let's try to invoke the endpoint._Note: The payload we're passing in the request is a CSV string with a header record, followed by multiple new lines. It also contains text columns, which the serving code converts to the set of indicator variables needed for our model predictions. Again, this is not a best practice for highly optimized code, however, it showcases the flexibility of bringing your own algorithm._
###Code
import pandas as pd
import json
iris_test = pd.read_csv('iris_test.csv')
runtime = boto3.Session().client('runtime.sagemaker')
%%time
# there is a limit of max 500 samples at a time for invoking endpoints
payload = iris_test.drop(['Sepal.Length'], axis=1).to_csv(index=False)
response = runtime.invoke_endpoint(EndpointName=r_endpoint,
ContentType='text/csv',
Body=payload)
result = json.loads(response['Body'].read().decode())
display(result)
###Output
_____no_output_____
###Markdown
We can see the result is a CSV of predictions for our target variable. Let's compare them to the actuals to see how our model did.
###Code
import matplotlib.pyplot as plt
import numpy as np
plt.scatter(iris_test['Sepal.Length'], np.fromstring(result[0], sep=','), alpha=0.4, s=50)
plt.xlabel('Sepal Length(Actual)')
plt.ylabel('Sepal Length(Prediction)')
x = np.linspace(*plt.xlim())
plt.plot(x, x, linestyle='--', color='g', linewidth=1)
plt.xlim(4,8)
plt.ylim(4,8)
plt.show()
###Output
_____no_output_____
###Markdown
(Optional) Clean-upIf you're ready to be done with this notebook, please run the cell below. This will remove the hosted endpoint you created and avoid any charges from a stray instance being left on.
###Code
boto3.client('sagemaker').delete_endpoint(EndpointName=r_endpoint)
###Output
_____no_output_____
###Markdown
Hyperparameter Tuning Your Own R Algorithm with Your Own Container in Amazon SageMaker_**Using Amazon SageMaker's Hyperparameter Tuning with a customer Docker container and R algorithm**_------ Contents1. [Background](Background)1. [Setup](Setup) 1. [Permissions](Permissions)1. [Code](Code) 1. [Publish](Publish)1. [Data](Data)1. [Tune](Tune)1. [HPO Analysis](HPO-Analysis)1. [Host](Host)1. [Predict](Predict)1. [(Optional) Clean-up]((Optional)-Clean-up)1. [Wrap-up](Wrap-up)--- BackgroundR is a popular open source statistical programming language, with a lengthy history in Data Science and Machine Learning. The breadth of algorithms available as R packages is impressive and fuels a diverse community of users. In this example, we'll combine one of those algorithms ([Multivariate Adaptive Regression Splines](https://en.wikipedia.org/wiki/Multivariate_adaptive_regression_splines)) with SageMaker's hyperparameter tuning capabilities to build a simple model on the well-known [Iris dataset](https://en.wikipedia.org/wiki/Iris_flower_data_set). This notebook will focus mainly on the integration of hyperparameter tuning and a custom algorithm container, as well as hosting the tuned model and making inference using the endpoint. For more examples, please see this [notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/advanced_functionality/r_bring_your_own/r_bring_your_own.ipynb).--- Setup_This notebook was created and tested on an ml.m4.xlarge notebook instance._Let's start by specifying:- The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the notebook instance, training, and hosting.- The IAM role arn used to give training and hosting access to your data. See the [documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/using-identity-based-policies.html) for more details on creating these. Note, if a role not associated with the current notebook instance, or more than one role is required for training and/or hosting, please replace `sagemaker.get_execution_role()` with a the appropriate full IAM role arn string(s).
###Code
import sagemaker
bucket = sagemaker.Session().default_bucket()
prefix = 'sagemaker/DEMO-hpo-r-byo'
role = sagemaker.get_execution_role()
###Output
_____no_output_____
###Markdown
Now we'll import the libraries we'll need for the remainder of the notebook.
###Code
import os
import boto3
import sagemaker
import pandas as pd
from sagemaker.tuner import IntegerParameter, CategoricalParameter, ContinuousParameter, HyperparameterTuner
###Output
_____no_output_____
###Markdown
PermissionsRunning this notebook requires permissions in addition to the normal `SageMakerFullAccess` permissions. This is because we'll be creating a new repository in Amazon ECR. The easiest way to add these permissions is simply to add the managed policy `AmazonEC2ContainerRegistryFullAccess` to the role associated with your notebook instance. There's no need to restart your notebook instance when you do this, the new permissions will be available immediately.--- CodeFor this example, we'll need 3 supporting code files. We'll provide just a brief overview of what each one does. See the full R bring your own notebook for more details.- **Fit**: `mars.R` creates functions to train and serve our model.- **Serve**: `plumber.R` uses the [plumber](https://www.rplumber.io/) package to create a lightweight HTTP server for processing requests in hosting. Note the specific syntax, and see the plumber help docs for additional detail on more specialized use cases.- **Dockerfile**: This specifies the configuration for our docker container. Smaller containers are preferred for Amazon SageMaker as they lead to faster spin up times in training and endpoint creation, so this container is kept minimal. It simply starts with Ubuntu, installs R, mda, and plumber libraries, then adds `mars.R` and `plumber.R`, and finally sets `mars.R` to run as the entrypoint when launched. PublishNow, to publish this container to ECR, we'll run the comands below.This command will take several minutes to run the first time.
###Code
algorithm_name = 'rmars'
%%sh
# The name of our algorithm
algorithm_name=rmars
#set -e # stop if anything fails
account=$(aws sts get-caller-identity --query Account --output text)
# Get the region defined in the current configuration (default to us-west-2 if none defined)
region=$(aws configure get region)
region=${region:-us-east-1}
fullname="${account}.dkr.ecr.${region}.amazonaws.com/${algorithm_name}:latest"
# If the repository doesn't exist in ECR, create it.
aws ecr describe-repositories --repository-names "${algorithm_name}" > /dev/null 2>&1
if [ $? -ne 0 ]
then
aws ecr create-repository --repository-name "${algorithm_name}" > /dev/null
fi
# Get the login command from ECR and execute it directly
$(aws ecr get-login --region ${region} --no-include-email)
# Build the docker image locally with the image name and then push it to ECR
# with the full name.
docker build -t ${algorithm_name} .
docker tag ${algorithm_name} ${fullname}
docker push ${fullname}
###Output
_____no_output_____
###Markdown
--- DataFor this illustrative example, we'll simply use `iris`. This a classic, but small, dataset used to test supervised learning algorithms. Typically the goal is to predict one of three flower species based on various measurements of the flowers' attributes. Further detail can be found [here](https://en.wikipedia.org/wiki/Iris_flower_data_set).Let's split the data to train and test datasets (70% / 30%) and then copy the data to S3 so that SageMaker training can access it.
###Code
data = pd.read_csv('iris.csv')
# Train/test split, 70%-30%
train_data = data.sample(frac=0.7, random_state=42)
test_data = data.drop(train_data.index)
test_data.head()
# Write to csv
train_data.to_csv('iris_train.csv', index=False)
test_data.to_csv('iris_test.csv', index=False)
# write to S3
train_file = 'iris_train.csv'
boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'train', train_file)).upload_file(train_file)
###Output
_____no_output_____
###Markdown
_Note: Although we could do preliminary data transformations in the notebook, we'll avoid doing so, instead choosing to do those transformations inside the container. This is not typically the best practice for model efficiency, but provides some benefits in terms of flexibility._ --- TuneNow, let's setup the information needed to train a Multivariate Adaptive Regression Splines model on `iris` data. In this case, we'll predict `Sepal.Length` rather than the more typical classification of `Species` in order to show how factors might be included in a model and to limit the use case to regression.First, we'll get our region and account information so that we can point to the ECR container we just created.
###Code
region = boto3.Session().region_name
account = boto3.client('sts').get_caller_identity().get('Account')
###Output
_____no_output_____
###Markdown
Now we'll create an estimator using the [SageMaker Python SDK](https://github.com/aws/sagemaker-python-sdk). This allows us to specify:- The training container image in ECR- The IAM role that controls permissions for accessing the S3 data and executing SageMaker functions- Number and type of training instances- S3 path for model artifacts to be output to- Any hyperparameters that we want to have the same value across all training jobs during tuning
###Code
estimator = sagemaker.estimator.Estimator(
image_name='{}.dkr.ecr.{}.amazonaws.com/rmars:latest'.format(account, region),
role=role,
train_instance_count=1,
train_instance_type='ml.m4.xlarge',
output_path='s3://{}/{}/output'.format(bucket, prefix),
sagemaker_session=sagemaker.Session(),
hyperparameters={'degree': 2}) # Setting constant hyperparameter
# target is by defauld "Sepal.Length". See mars.R where this is set.
###Output
_____no_output_____
###Markdown
Once we've defined our estimator we can specify the hyperparameters that we'd like to tune and their possible values. We have three different types of hyperparameters.- Categorical parameters need to take one value from a discrete set. We define this by passing the list of possible values to `CategoricalParameter(list)`- Continuous parameters can take any real number value between the minimum and maximum value, defined by `ContinuousParameter(min, max)`- Integer parameters can take any integer value between the minimum and maximum value, defined by `IntegerParameter(min, max)`*Note, if possible, it's almost always best to specify a value as the least restrictive type. For example, tuning `thresh` as a continuous value between 0.01 and 0.2 is likely to yield a better result than tuning as a categorical parameter with possible values of 0.01, 0.1, 0.15, or 0.2.*
###Code
# to set the degree as a varying HP to tune, use: 'degree': IntegerParameter(1, 3) and remove it from the Estimator
hyperparameter_ranges = {'thresh': ContinuousParameter(0.001, 0.01),
'prune': CategoricalParameter(['TRUE', 'FALSE'])}
###Output
_____no_output_____
###Markdown
Next we'll specify the objective metric that we'd like to tune and its definition. This metric is output by a `print` statement in our `mars.R` file. Its critical that the format aligns with the regular expression (Regex) we then specify to extract that metric from the CloudWatch logs of our training job.
###Code
objective_metric_name = 'mse'
metric_definitions = [{'Name': 'mse',
'Regex': 'mse: ([0-9\\.]+)'}]
###Output
_____no_output_____
###Markdown
Now, we'll create a `HyperparameterTuner` object, which we pass:- The MXNet estimator we created above- Our hyperparameter ranges- Objective metric name and definition- Whether we should maximize or minimize our objective metric (defaults to 'Maximize')- Number of training jobs to run in total and how many training jobs should be run simultaneously. More parallel jobs will finish tuning sooner, but may sacrifice accuracy. We recommend you set the parallel jobs value to less than 10% of the total number of training jobs (we'll set it higher just for this example to keep it short).
###Code
tuner = HyperparameterTuner(estimator,
objective_metric_name,
hyperparameter_ranges,
metric_definitions,
objective_type='Minimize',
max_jobs=9,
max_parallel_jobs=3)
###Output
_____no_output_____
###Markdown
And finally, we can start our hyperparameter tuning job by calling `.fit()` and passing in the S3 paths to our train and test datasets.*Note, typically for hyperparameter tuning, we'd want to specify both a training and validation (or test) dataset and optimize the objective metric from the validation dataset. However, because `iris` is a very small dataset we'll skip the step of splitting into training and validation. In practice, doing this could lead to a model that overfits to our training data and does not generalize well.*
###Code
tuner.fit({'train': 's3://{}/{}/train'.format(bucket, prefix)})
###Output
_____no_output_____
###Markdown
Let's just run a quick check of the hyperparameter tuning jobs status to make sure it started successfully and is `InProgress`.
###Code
import time
status = boto3.client('sagemaker').describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name)['HyperParameterTuningJobStatus']
while status != "Completed":
status = boto3.client('sagemaker').describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name)['HyperParameterTuningJobStatus']
completed = boto3.client('sagemaker').describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name)['TrainingJobStatusCounters']['Completed']
prog = boto3.client('sagemaker').describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name)['TrainingJobStatusCounters']['InProgress']
print(f'{status}, Completed Jobs: {completed}, In Progress Jobs: {prog}')
time.sleep(30)
###Output
_____no_output_____
###Markdown
Wait until the HPO job is complete, and then run the following cell:
###Code
boto3.client('sagemaker').describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name)['BestTrainingJob']
###Output
_____no_output_____
###Markdown
--- HPO AnalysisNow that we've started our hyperparameter tuning job, it will run in the background and we can close this notebook. Once finished, we can use the [HPO Analysis notebook](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/hyperparameter_tuning/analyze_results/HPO_Analyze_TuningJob_Results.ipynb) to determine which set of hyperparameters worked best.For more detail on Amazon SageMaker's Hyperparameter Tuning, please refer to the AWS documentation. --- HostHosting the model we just tuned takes three steps in Amazon SageMaker. First, we define the model we want to host, pointing the service to the model artifact our training job just wrote to S3.We will use the results of the HPO for this purpose, but using `hyper_parameter_tuning_job` method.
###Code
best_training = boto3.client('sagemaker').describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name)['BestTrainingJob']
# Get the best trainig job and S3 location for the model file
best_model_s3 = boto3.client('sagemaker').describe_training_job(
TrainingJobName=best_training['TrainingJobName'])['ModelArtifacts']['S3ModelArtifacts']
best_model_s3
import time
r_job = 'DEMO-r-byo-' + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
r_hosting_container = {
'Image': '{}.dkr.ecr.{}.amazonaws.com/{}:latest'.format(account, region, algorithm_name),
'ModelDataUrl': best_model_s3
}
create_model_response = boto3.client('sagemaker').create_model(
ModelName=r_job,
ExecutionRoleArn=role,
PrimaryContainer=r_hosting_container)
print(create_model_response['ModelArn'])
###Output
_____no_output_____
###Markdown
Next, let's create an endpoing configuration, passing in the model we just registered. In this case, we'll only use a few c4.xlarges.
###Code
r_endpoint_config = 'DEMO-r-byo-config-' + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
print(r_endpoint_config)
create_endpoint_config_response = boto3.client('sagemaker').create_endpoint_config(
EndpointConfigName=r_endpoint_config,
ProductionVariants=[{
'InstanceType': 'ml.t2.medium',
'InitialInstanceCount': 1,
'ModelName': r_job,
'VariantName': 'AllTraffic'}])
print("Endpoint Config Arn: " + create_endpoint_config_response['EndpointConfigArn'])
###Output
_____no_output_____
###Markdown
Finally, we'll create the endpoints using our endpoint configuration from the last step.
###Code
%%time
r_endpoint = 'DEMO-r-endpoint-' + time.strftime("%Y%m%d%H%M", time.gmtime())
print(r_endpoint)
create_endpoint_response = boto3.client('sagemaker').create_endpoint(
EndpointName=r_endpoint,
EndpointConfigName=r_endpoint_config)
print(create_endpoint_response['EndpointArn'])
resp = boto3.client('sagemaker').describe_endpoint(EndpointName=r_endpoint)
status = resp['EndpointStatus']
print("Status: " + status)
try:
boto3.client('sagemaker').get_waiter('endpoint_in_service').wait(EndpointName=r_endpoint)
finally:
resp = boto3.client('sagemaker').describe_endpoint(EndpointName=r_endpoint)
status = resp['EndpointStatus']
print("Arn: " + resp['EndpointArn'])
print("Status: " + status)
if status != 'InService':
raise Exception('Endpoint creation did not succeed')
###Output
_____no_output_____
###Markdown
--- PredictTo confirm our endpoints are working properly, let's try to invoke the endpoint._Note: The payload we're passing in the request is a CSV string with a header record, followed by multiple new lines. It also contains text columns, which the serving code converts to the set of indicator variables needed for our model predictions. Again, this is not a best practice for highly optimized code, however, it showcases the flexibility of bringing your own algorithm._
###Code
import pandas as pd
import json
iris_test = pd.read_csv('iris_test.csv')
runtime = boto3.Session().client('runtime.sagemaker')
%%time
# there is a limit of max 500 samples at a time for invoking endpoints
payload = iris_test.drop(['Sepal.Length'], axis=1).to_csv(index=False)
response = runtime.invoke_endpoint(EndpointName=r_endpoint,
ContentType='text/csv',
Body=payload)
result = json.loads(response['Body'].read().decode())
display(result)
###Output
_____no_output_____
###Markdown
We can see the result is a CSV of predictions for our target variable. Let's compare them to the actuals to see how our model did.
###Code
import matplotlib.pyplot as plt
import numpy as np
plt.scatter(iris_test['Sepal.Length'], np.fromstring(result[0], sep=','), alpha=0.4, s=50)
plt.xlabel('Sepal Length(Actual)')
plt.ylabel('Sepal Length(Prediction)')
x = np.linspace(*plt.xlim())
plt.plot(x, x, linestyle='--', color='g', linewidth=1)
plt.xlim(4,8)
plt.ylim(4,8)
plt.show()
###Output
_____no_output_____
###Markdown
(Optional) Clean-upIf you're ready to be done with this notebook, please run the cell below. This will remove the hosted endpoint you created and avoid any charges from a stray instance being left on.
###Code
boto3.client('sagemaker').delete_endpoint(EndpointName=r_endpoint)
###Output
_____no_output_____
###Markdown
Hyperparameter Tuning Your Own R Algorithm with Your Own Container in Amazon SageMaker_**Using Amazon SageMaker's Hyperparameter Tuning with a customer Docker container and R algorithm**_---**Read before running the notebook:**- This notebook has been updated to SageMaker v2.0- Use Python3 kernel for this notebook.- Dockerfile has been updated to use [Amazon ECR Public Gallery](https://docs.aws.amazon.com/AmazonECR/latest/public/public-gallery.html) Contents1. [Background](Background)1. [Setup](Setup) 1. [Permissions](Permissions)1. [Code](Code) 1. [Publish](Publish)1. [Data](Data)1. [Tune](Tune)1. [HPO Analysis](HPO-Analysis)1. [Host](Host)1. [Predict](Predict)1. [(Optional) Clean-up]((Optional)-Clean-up)1. [Wrap-up](Wrap-up)--- BackgroundR is a popular open source statistical programming language, with a lengthy history in Data Science and Machine Learning. The breadth of algorithms available as R packages is impressive and fuels a diverse community of users. In this example, we'll combine one of those algorithms ([Multivariate Adaptive Regression Splines](https://en.wikipedia.org/wiki/Multivariate_adaptive_regression_splines)) with SageMaker's hyperparameter tuning capabilities to build a simple model on the well-known [Iris dataset](https://en.wikipedia.org/wiki/Iris_flower_data_set). This notebook will focus mainly on the integration of hyperparameter tuning and a custom algorithm container, as well as hosting the tuned model and making inference using the endpoint. For more examples, please see this [notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/advanced_functionality/r_bring_your_own/r_bring_your_own.ipynb).--- Setup_This notebook was created and tested on an ml.m4.xlarge notebook instance._Let's start by specifying:- The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the notebook instance, training, and hosting.- The IAM role arn used to give training and hosting access to your data. See the [documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/using-identity-based-policies.html) for more details on creating these. Note, if a role not associated with the current notebook instance, or more than one role is required for training and/or hosting, please replace `sagemaker.get_execution_role()` with a the appropriate full IAM role arn string(s).
###Code
import sagemaker
bucket = sagemaker.Session().default_bucket()
prefix = "sagemaker/DEMO-hpo-r-byo"
role = sagemaker.get_execution_role()
###Output
_____no_output_____
###Markdown
Now we'll import the libraries we'll need for the remainder of the notebook.
###Code
import os
import boto3
import sagemaker
import pandas as pd
from sagemaker.tuner import (
IntegerParameter,
CategoricalParameter,
ContinuousParameter,
HyperparameterTuner,
)
###Output
_____no_output_____
###Markdown
PermissionsRunning this notebook requires permissions in addition to the normal `SageMakerFullAccess` permissions. This is because we'll be creating a new repository in Amazon ECR. The easiest way to add these permissions is simply to add the managed policy `AmazonEC2ContainerRegistryFullAccess` to the role associated with your notebook instance. There's no need to restart your notebook instance when you do this, the new permissions will be available immediately.--- CodeFor this example, we'll need 3 supporting code files. We'll provide just a brief overview of what each one does. See the full R bring your own notebook for more details.- **Fit**: `mars.R` creates functions to train and serve our model.- **Serve**: `plumber.R` uses the [plumber](https://www.rplumber.io/) package to create a lightweight HTTP server for processing requests in hosting. Note the specific syntax, and see the plumber help docs for additional detail on more specialized use cases.- **Dockerfile**: This specifies the configuration for our docker container. Smaller containers are preferred for Amazon SageMaker as they lead to faster spin up times in training and endpoint creation, so this container is kept minimal. This docker file starts with base R image, installs `plumber` and `mda` libraries and their dependecies, then adds `mars.R` and `plumber.R`, and finally sets `mars.R` to run as the entrypoint when launched. - **Upodate:** The updated dockerfile leverages public R-Base image from [Amazon Public ECR Gallery](https://aws.amazon.com/about-aws/whats-new/2020/12/announcing-amazon-ecr-public-and-amazon-ecr-public-gallery/) which has been available since December 2020. Feel free to read more about this public gallery and browse for public images at https://gallery.ecr.aws/. PublishNow, to publish this container to ECR, we'll run the comands below.This command will take several minutes to run the first time.
###Code
algorithm_name = "rmars"
%%sh
# The name of our algorithm
algorithm_name=rmars
#set -e # stop if anything fails
account=$(aws sts get-caller-identity --query Account --output text)
# Get the region defined in the current configuration (default to us-west-2 if none defined)
region=$(aws configure get region)
region=${region:-us-east-1}
fullname="${account}.dkr.ecr.${region}.amazonaws.com/${algorithm_name}:latest"
# If the repository doesn't exist in ECR, create it.
aws ecr describe-repositories --repository-names "${algorithm_name}" > /dev/null 2>&1
if [ $? -ne 0 ]
then
aws ecr create-repository --repository-name "${algorithm_name}" > /dev/null
fi
# Get the login command from ECR and execute it directly
$(aws ecr get-login-password --region ${region} --no-include-email)
# Build the docker image locally with the image name and then push it to ECR
# with the full name.
docker build -t ${algorithm_name} .
docker tag ${algorithm_name} ${fullname}
docker push ${fullname}
###Output
_____no_output_____
###Markdown
--- DataFor this illustrative example, we'll simply use `iris`. This a classic, but small, dataset used to test supervised learning algorithms. Typically the goal is to predict one of three flower species based on various measurements of the flowers' attributes. Further detail can be found [here](https://en.wikipedia.org/wiki/Iris_flower_data_set).Let's split the data to train and test datasets (70% / 30%) and then copy the data to S3 so that SageMaker training can access it.
###Code
data = pd.read_csv("iris.csv")
# Train/test split, 70%-30%
train_data = data.sample(frac=0.7, random_state=42)
test_data = data.drop(train_data.index)
test_data.head()
# Write to csv
train_data.to_csv("iris_train.csv", index=False)
test_data.to_csv("iris_test.csv", index=False)
# write to S3
train_file = "iris_train.csv"
boto3.Session().resource("s3").Bucket(bucket).Object(
os.path.join(prefix, "train", train_file)
).upload_file(train_file)
###Output
_____no_output_____
###Markdown
_Note: Although we could do preliminary data transformations in the notebook, we'll avoid doing so, instead choosing to do those transformations inside the container. This is not typically the best practice for model efficiency, but provides some benefits in terms of flexibility._ --- TuneNow, let's setup the information needed to train a Multivariate Adaptive Regression Splines model on `iris` data. In this case, we'll predict `Sepal.Length` rather than the more typical classification of `Species` in order to show how factors might be included in a model and to limit the use case to regression.First, we'll get our region and account information so that we can point to the ECR container we just created.
###Code
region = boto3.Session().region_name
account = boto3.client("sts").get_caller_identity().get("Account")
###Output
_____no_output_____
###Markdown
Now we'll create an estimator using the [SageMaker Python SDK](https://github.com/aws/sagemaker-python-sdk). This allows us to specify:- The training container image in ECR- The IAM role that controls permissions for accessing the S3 data and executing SageMaker functions- Number and type of training instances- S3 path for model artifacts to be output to- Any hyperparameters that we want to have the same value across all training jobs during tuning
###Code
estimator = sagemaker.estimator.Estimator(
image_uri="{}.dkr.ecr.{}.amazonaws.com/rmars:latest".format(account, region),
role=role,
instance_count=1,
instance_type="ml.m4.xlarge",
output_path="s3://{}/{}/output".format(bucket, prefix),
sagemaker_session=sagemaker.Session(),
hyperparameters={"degree": 2},
) # Setting constant hyperparameter
# target is by defauld "Sepal.Length". See mars.R where this is set.
###Output
_____no_output_____
###Markdown
Once we've defined our estimator we can specify the hyperparameters that we'd like to tune and their possible values. We have three different types of hyperparameters.- Categorical parameters need to take one value from a discrete set. We define this by passing the list of possible values to `CategoricalParameter(list)`- Continuous parameters can take any real number value between the minimum and maximum value, defined by `ContinuousParameter(min, max)`- Integer parameters can take any integer value between the minimum and maximum value, defined by `IntegerParameter(min, max)`*Note, if possible, it's almost always best to specify a value as the least restrictive type. For example, tuning `thresh` as a continuous value between 0.01 and 0.2 is likely to yield a better result than tuning as a categorical parameter with possible values of 0.01, 0.1, 0.15, or 0.2.*
###Code
# to set the degree as a varying HP to tune, use: 'degree': IntegerParameter(1, 3) and remove it from the Estimator
hyperparameter_ranges = {
"thresh": ContinuousParameter(0.001, 0.01),
"prune": CategoricalParameter(["TRUE", "FALSE"]),
}
###Output
_____no_output_____
###Markdown
Next we'll specify the objective metric that we'd like to tune and its definition. This metric is output by a `print` statement in our `mars.R` file. Its critical that the format aligns with the regular expression (Regex) we then specify to extract that metric from the CloudWatch logs of our training job.
###Code
objective_metric_name = "mse"
metric_definitions = [{"Name": "mse", "Regex": "mse: ([0-9\\.]+)"}]
###Output
_____no_output_____
###Markdown
Now, we'll create a `HyperparameterTuner` object, which we pass:- The MXNet estimator we created above- Our hyperparameter ranges- Objective metric name and definition- Whether we should maximize or minimize our objective metric (defaults to 'Maximize')- Number of training jobs to run in total and how many training jobs should be run simultaneously. More parallel jobs will finish tuning sooner, but may sacrifice accuracy. We recommend you set the parallel jobs value to less than 10% of the total number of training jobs (we'll set it higher just for this example to keep it short).
###Code
tuner = HyperparameterTuner(
estimator,
objective_metric_name,
hyperparameter_ranges,
metric_definitions,
objective_type="Minimize",
max_jobs=9,
max_parallel_jobs=3,
)
###Output
_____no_output_____
###Markdown
And finally, we can start our hyperparameter tuning job by calling `.fit()` and passing in the S3 paths to our train and test datasets.*Note, typically for hyperparameter tuning, we'd want to specify both a training and validation (or test) dataset and optimize the objective metric from the validation dataset. However, because `iris` is a very small dataset we'll skip the step of splitting into training and validation. In practice, doing this could lead to a model that overfits to our training data and does not generalize well.*
###Code
tuner.fit({"train": "s3://{}/{}/train".format(bucket, prefix)}, wait=False)
###Output
_____no_output_____
###Markdown
Let's just run a quick check of the hyperparameter tuning jobs status to make sure it started successfully and is `InProgress`.
###Code
import time
status = boto3.client("sagemaker").describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name
)["HyperParameterTuningJobStatus"]
while status != "Completed":
status = boto3.client("sagemaker").describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name
)["HyperParameterTuningJobStatus"]
completed = boto3.client("sagemaker").describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name
)["TrainingJobStatusCounters"]["Completed"]
prog = boto3.client("sagemaker").describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name
)["TrainingJobStatusCounters"]["InProgress"]
print(f"{status}, Completed Jobs: {completed}, In Progress Jobs: {prog}")
time.sleep(30)
###Output
_____no_output_____
###Markdown
Wait until the HPO job is complete, and then run the following cell:
###Code
boto3.client("sagemaker").describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name
)["BestTrainingJob"]
###Output
_____no_output_____
###Markdown
--- HPO AnalysisNow that we've started our hyperparameter tuning job, it will run in the background and we can close this notebook. Once finished, we can use the [HPO Analysis notebook](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/hyperparameter_tuning/analyze_results/HPO_Analyze_TuningJob_Results.ipynb) to determine which set of hyperparameters worked best.For more detail on Amazon SageMaker's Hyperparameter Tuning, please refer to the AWS documentation. --- HostHosting the model we just tuned takes three steps in Amazon SageMaker. First, we define the model we want to host, pointing the service to the model artifact our training job just wrote to S3.We will use the results of the HPO for this purpose, but using `hyper_parameter_tuning_job` method.
###Code
best_training = boto3.client("sagemaker").describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name
)["BestTrainingJob"]
# Get the best trainig job and S3 location for the model file
best_model_s3 = boto3.client("sagemaker").describe_training_job(
TrainingJobName=best_training["TrainingJobName"]
)["ModelArtifacts"]["S3ModelArtifacts"]
best_model_s3
import time
r_job = "DEMO-r-byo-" + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
r_hosting_container = {
"Image": "{}.dkr.ecr.{}.amazonaws.com/{}:latest".format(account, region, algorithm_name),
"ModelDataUrl": best_model_s3,
}
create_model_response = boto3.client("sagemaker").create_model(
ModelName=r_job, ExecutionRoleArn=role, PrimaryContainer=r_hosting_container
)
print(create_model_response["ModelArn"])
###Output
_____no_output_____
###Markdown
Next, let's create an endpoing configuration, passing in the model we just registered. In this case, we'll only use a few c4.xlarges.
###Code
r_endpoint_config = "DEMO-r-byo-config-" + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
print(r_endpoint_config)
create_endpoint_config_response = boto3.client("sagemaker").create_endpoint_config(
EndpointConfigName=r_endpoint_config,
ProductionVariants=[
{
"InstanceType": "ml.t2.medium",
"InitialInstanceCount": 1,
"ModelName": r_job,
"VariantName": "AllTraffic",
}
],
)
print("Endpoint Config Arn: " + create_endpoint_config_response["EndpointConfigArn"])
###Output
_____no_output_____
###Markdown
Finally, we'll create the endpoints using our endpoint configuration from the last step.
###Code
%%time
r_endpoint = "DEMO-r-endpoint-" + time.strftime("%Y%m%d%H%M", time.gmtime())
print(r_endpoint)
create_endpoint_response = boto3.client("sagemaker").create_endpoint(
EndpointName=r_endpoint, EndpointConfigName=r_endpoint_config
)
print(create_endpoint_response["EndpointArn"])
resp = boto3.client("sagemaker").describe_endpoint(EndpointName=r_endpoint)
status = resp["EndpointStatus"]
print("Status: " + status)
try:
boto3.client("sagemaker").get_waiter("endpoint_in_service").wait(EndpointName=r_endpoint)
finally:
resp = boto3.client("sagemaker").describe_endpoint(EndpointName=r_endpoint)
status = resp["EndpointStatus"]
print("Arn: " + resp["EndpointArn"])
print("Status: " + status)
if status != "InService":
raise Exception("Endpoint creation did not succeed")
###Output
_____no_output_____
###Markdown
--- PredictTo confirm our endpoints are working properly, let's try to invoke the endpoint._Note: The payload we're passing in the request is a CSV string with a header record, followed by multiple new lines. It also contains text columns, which the serving code converts to the set of indicator variables needed for our model predictions. Again, this is not a best practice for highly optimized code, however, it showcases the flexibility of bringing your own algorithm._
###Code
import pandas as pd
import json
iris_test = pd.read_csv("iris_test.csv")
runtime = boto3.Session().client("runtime.sagemaker")
%%time
# there is a limit of max 500 samples at a time for invoking endpoints
payload = iris_test.drop(["Sepal.Length"], axis=1).to_csv(index=False)
response = runtime.invoke_endpoint(EndpointName=r_endpoint, ContentType="text/csv", Body=payload)
result = json.loads(response["Body"].read().decode())
display(result)
###Output
_____no_output_____
###Markdown
We can see the result is a CSV of predictions for our target variable. Let's compare them to the actuals to see how our model did.
###Code
import matplotlib.pyplot as plt
import numpy as np
plt.scatter(iris_test["Sepal.Length"], np.fromstring(result[0], sep=","), alpha=0.4, s=50)
plt.xlabel("Sepal Length(Actual)")
plt.ylabel("Sepal Length(Prediction)")
x = np.linspace(*plt.xlim())
plt.plot(x, x, linestyle="--", color="g", linewidth=1)
plt.xlim(4, 8)
plt.ylim(4, 8)
plt.show()
###Output
_____no_output_____
###Markdown
(Optional) Clean-upIf you're ready to be done with this notebook, please run the cell below. This will remove the hosted endpoint you created and avoid any charges from a stray instance being left on.
###Code
boto3.client("sagemaker").delete_endpoint(EndpointName=r_endpoint)
###Output
_____no_output_____
###Markdown
Hyperparameter Tuning Your Own R Algorithm with Your Own Container in Amazon SageMaker_**Using Amazon SageMaker's Hyperparameter Tuning with a customer Docker container and R algorithm**_---**Read before running the notebook:**- This notebook has been updated to SageMaker v2.0- Use Python3 kernel for this notebook.- Dockerfile has been updated to use [Amazon ECR Public Gallery](https://docs.aws.amazon.com/AmazonECR/latest/public/public-gallery.html) Contents1. [Background](Background)1. [Setup](Setup) 1. [Permissions](Permissions)1. [Code](Code) 1. [Publish](Publish)1. [Data](Data)1. [Tune](Tune)1. [HPO Analysis](HPO-Analysis)1. [Host](Host)1. [Predict](Predict)1. [(Optional) Clean-up]((Optional)-Clean-up)1. [Wrap-up](Wrap-up)--- BackgroundR is a popular open source statistical programming language, with a lengthy history in Data Science and Machine Learning. The breadth of algorithms available as R packages is impressive and fuels a diverse community of users. In this example, we'll combine one of those algorithms ([Multivariate Adaptive Regression Splines](https://en.wikipedia.org/wiki/Multivariate_adaptive_regression_splines)) with SageMaker's hyperparameter tuning capabilities to build a simple model on the well-known [Iris dataset](https://en.wikipedia.org/wiki/Iris_flower_data_set). This notebook will focus mainly on the integration of hyperparameter tuning and a custom algorithm container, as well as hosting the tuned model and making inference using the endpoint. For more examples, please see this [notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/advanced_functionality/r_bring_your_own/r_bring_your_own.ipynb).--- Setup_This notebook was created and tested on an ml.m4.xlarge notebook instance._Let's start by specifying:- The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the notebook instance, training, and hosting.- The IAM role arn used to give training and hosting access to your data. See the [documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/using-identity-based-policies.html) for more details on creating these. Note, if a role not associated with the current notebook instance, or more than one role is required for training and/or hosting, please replace `sagemaker.get_execution_role()` with a the appropriate full IAM role arn string(s).
###Code
import sagemaker
bucket = sagemaker.Session().default_bucket()
prefix = "sagemaker/DEMO-hpo-r-byo"
role = sagemaker.get_execution_role()
###Output
_____no_output_____
###Markdown
Now we'll import the libraries we'll need for the remainder of the notebook.
###Code
import os
import boto3
import sagemaker
import pandas as pd
from sagemaker.tuner import (
IntegerParameter,
CategoricalParameter,
ContinuousParameter,
HyperparameterTuner,
)
###Output
_____no_output_____
###Markdown
PermissionsRunning this notebook requires permissions in addition to the normal `SageMakerFullAccess` permissions. This is because we'll be creating a new repository in Amazon ECR. The easiest way to add these permissions is simply to add the managed policy `AmazonEC2ContainerRegistryFullAccess` to the role associated with your notebook instance. There's no need to restart your notebook instance when you do this, the new permissions will be available immediately.--- CodeFor this example, we'll need 3 supporting code files. We'll provide just a brief overview of what each one does. See the full R bring your own notebook for more details.- **Fit**: `mars.R` creates functions to train and serve our model.- **Serve**: `plumber.R` uses the [plumber](https://www.rplumber.io/) package to create a lightweight HTTP server for processing requests in hosting. Note the specific syntax, and see the plumber help docs for additional detail on more specialized use cases.- **Dockerfile**: This specifies the configuration for our docker container. Smaller containers are preferred for Amazon SageMaker as they lead to faster spin up times in training and endpoint creation, so this container is kept minimal. This docker file starts with base R image, installs `plumber` and `mda` libraries and their dependecies, then adds `mars.R` and `plumber.R`, and finally sets `mars.R` to run as the entrypoint when launched. - **Upodate:** The updated dockerfile leverages public R-Base image from [Amazon Public ECR Gallery](https://aws.amazon.com/about-aws/whats-new/2020/12/announcing-amazon-ecr-public-and-amazon-ecr-public-gallery/) which has been available since December 2020. Feel free to read more about this public gallery and browse for public images at https://gallery.ecr.aws/. PublishNow, to publish this container to ECR, we'll run the comands below.This command will take several minutes to run the first time.
###Code
algorithm_name = "rmars"
%%sh
# The name of our algorithm
algorithm_name=rmars
#set -e # stop if anything fails
account=$(aws sts get-caller-identity --query Account --output text)
# Get the region defined in the current configuration (default to us-west-2 if none defined)
region=$(aws configure get region)
region=${region:-us-east-1}
fullname="${account}.dkr.ecr.${region}.amazonaws.com/${algorithm_name}:latest"
# If the repository doesn't exist in ECR, create it.
aws ecr describe-repositories --repository-names "${algorithm_name}" > /dev/null 2>&1
if [ $? -ne 0 ]
then
aws ecr create-repository --repository-name "${algorithm_name}" > /dev/null
fi
# Get the login command from ECR and execute it directly
$(aws ecr get-login --region ${region} --no-include-email)
# Build the docker image locally with the image name and then push it to ECR
# with the full name.
docker build -t ${algorithm_name} .
docker tag ${algorithm_name} ${fullname}
docker push ${fullname}
###Output
_____no_output_____
###Markdown
--- DataFor this illustrative example, we'll simply use `iris`. This a classic, but small, dataset used to test supervised learning algorithms. Typically the goal is to predict one of three flower species based on various measurements of the flowers' attributes. Further detail can be found [here](https://en.wikipedia.org/wiki/Iris_flower_data_set).Let's split the data to train and test datasets (70% / 30%) and then copy the data to S3 so that SageMaker training can access it.
###Code
data = pd.read_csv("iris.csv")
# Train/test split, 70%-30%
train_data = data.sample(frac=0.7, random_state=42)
test_data = data.drop(train_data.index)
test_data.head()
# Write to csv
train_data.to_csv("iris_train.csv", index=False)
test_data.to_csv("iris_test.csv", index=False)
# write to S3
train_file = "iris_train.csv"
boto3.Session().resource("s3").Bucket(bucket).Object(
os.path.join(prefix, "train", train_file)
).upload_file(train_file)
###Output
_____no_output_____
###Markdown
_Note: Although we could do preliminary data transformations in the notebook, we'll avoid doing so, instead choosing to do those transformations inside the container. This is not typically the best practice for model efficiency, but provides some benefits in terms of flexibility._ --- TuneNow, let's setup the information needed to train a Multivariate Adaptive Regression Splines model on `iris` data. In this case, we'll predict `Sepal.Length` rather than the more typical classification of `Species` in order to show how factors might be included in a model and to limit the use case to regression.First, we'll get our region and account information so that we can point to the ECR container we just created.
###Code
region = boto3.Session().region_name
account = boto3.client("sts").get_caller_identity().get("Account")
###Output
_____no_output_____
###Markdown
Now we'll create an estimator using the [SageMaker Python SDK](https://github.com/aws/sagemaker-python-sdk). This allows us to specify:- The training container image in ECR- The IAM role that controls permissions for accessing the S3 data and executing SageMaker functions- Number and type of training instances- S3 path for model artifacts to be output to- Any hyperparameters that we want to have the same value across all training jobs during tuning
###Code
estimator = sagemaker.estimator.Estimator(
image_uri="{}.dkr.ecr.{}.amazonaws.com/rmars:latest".format(account, region),
role=role,
instance_count=1,
instance_type="ml.m4.xlarge",
output_path="s3://{}/{}/output".format(bucket, prefix),
sagemaker_session=sagemaker.Session(),
hyperparameters={"degree": 2},
) # Setting constant hyperparameter
# target is by defauld "Sepal.Length". See mars.R where this is set.
###Output
_____no_output_____
###Markdown
Once we've defined our estimator we can specify the hyperparameters that we'd like to tune and their possible values. We have three different types of hyperparameters.- Categorical parameters need to take one value from a discrete set. We define this by passing the list of possible values to `CategoricalParameter(list)`- Continuous parameters can take any real number value between the minimum and maximum value, defined by `ContinuousParameter(min, max)`- Integer parameters can take any integer value between the minimum and maximum value, defined by `IntegerParameter(min, max)`*Note, if possible, it's almost always best to specify a value as the least restrictive type. For example, tuning `thresh` as a continuous value between 0.01 and 0.2 is likely to yield a better result than tuning as a categorical parameter with possible values of 0.01, 0.1, 0.15, or 0.2.*
###Code
# to set the degree as a varying HP to tune, use: 'degree': IntegerParameter(1, 3) and remove it from the Estimator
hyperparameter_ranges = {
"thresh": ContinuousParameter(0.001, 0.01),
"prune": CategoricalParameter(["TRUE", "FALSE"]),
}
###Output
_____no_output_____
###Markdown
Next we'll specify the objective metric that we'd like to tune and its definition. This metric is output by a `print` statement in our `mars.R` file. Its critical that the format aligns with the regular expression (Regex) we then specify to extract that metric from the CloudWatch logs of our training job.
###Code
objective_metric_name = "mse"
metric_definitions = [{"Name": "mse", "Regex": "mse: ([0-9\\.]+)"}]
###Output
_____no_output_____
###Markdown
Now, we'll create a `HyperparameterTuner` object, which we pass:- The MXNet estimator we created above- Our hyperparameter ranges- Objective metric name and definition- Whether we should maximize or minimize our objective metric (defaults to 'Maximize')- Number of training jobs to run in total and how many training jobs should be run simultaneously. More parallel jobs will finish tuning sooner, but may sacrifice accuracy. We recommend you set the parallel jobs value to less than 10% of the total number of training jobs (we'll set it higher just for this example to keep it short).
###Code
tuner = HyperparameterTuner(
estimator,
objective_metric_name,
hyperparameter_ranges,
metric_definitions,
objective_type="Minimize",
max_jobs=9,
max_parallel_jobs=3,
)
###Output
_____no_output_____
###Markdown
And finally, we can start our hyperparameter tuning job by calling `.fit()` and passing in the S3 paths to our train and test datasets.*Note, typically for hyperparameter tuning, we'd want to specify both a training and validation (or test) dataset and optimize the objective metric from the validation dataset. However, because `iris` is a very small dataset we'll skip the step of splitting into training and validation. In practice, doing this could lead to a model that overfits to our training data and does not generalize well.*
###Code
tuner.fit({"train": "s3://{}/{}/train".format(bucket, prefix)}, wait=False)
###Output
_____no_output_____
###Markdown
Let's just run a quick check of the hyperparameter tuning jobs status to make sure it started successfully and is `InProgress`.
###Code
import time
status = boto3.client("sagemaker").describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name
)["HyperParameterTuningJobStatus"]
while status != "Completed":
status = boto3.client("sagemaker").describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name
)["HyperParameterTuningJobStatus"]
completed = boto3.client("sagemaker").describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name
)["TrainingJobStatusCounters"]["Completed"]
prog = boto3.client("sagemaker").describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name
)["TrainingJobStatusCounters"]["InProgress"]
print(f"{status}, Completed Jobs: {completed}, In Progress Jobs: {prog}")
time.sleep(30)
###Output
_____no_output_____
###Markdown
Wait until the HPO job is complete, and then run the following cell:
###Code
boto3.client("sagemaker").describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name
)["BestTrainingJob"]
###Output
_____no_output_____
###Markdown
--- HPO AnalysisNow that we've started our hyperparameter tuning job, it will run in the background and we can close this notebook. Once finished, we can use the [HPO Analysis notebook](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/hyperparameter_tuning/analyze_results/HPO_Analyze_TuningJob_Results.ipynb) to determine which set of hyperparameters worked best.For more detail on Amazon SageMaker's Hyperparameter Tuning, please refer to the AWS documentation. --- HostHosting the model we just tuned takes three steps in Amazon SageMaker. First, we define the model we want to host, pointing the service to the model artifact our training job just wrote to S3.We will use the results of the HPO for this purpose, but using `hyper_parameter_tuning_job` method.
###Code
best_training = boto3.client("sagemaker").describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuner.latest_tuning_job.job_name
)["BestTrainingJob"]
# Get the best trainig job and S3 location for the model file
best_model_s3 = boto3.client("sagemaker").describe_training_job(
TrainingJobName=best_training["TrainingJobName"]
)["ModelArtifacts"]["S3ModelArtifacts"]
best_model_s3
import time
r_job = "DEMO-r-byo-" + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
r_hosting_container = {
"Image": "{}.dkr.ecr.{}.amazonaws.com/{}:latest".format(account, region, algorithm_name),
"ModelDataUrl": best_model_s3,
}
create_model_response = boto3.client("sagemaker").create_model(
ModelName=r_job, ExecutionRoleArn=role, PrimaryContainer=r_hosting_container
)
print(create_model_response["ModelArn"])
###Output
_____no_output_____
###Markdown
Next, let's create an endpoing configuration, passing in the model we just registered. In this case, we'll only use a few c4.xlarges.
###Code
r_endpoint_config = "DEMO-r-byo-config-" + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
print(r_endpoint_config)
create_endpoint_config_response = boto3.client("sagemaker").create_endpoint_config(
EndpointConfigName=r_endpoint_config,
ProductionVariants=[
{
"InstanceType": "ml.t2.medium",
"InitialInstanceCount": 1,
"ModelName": r_job,
"VariantName": "AllTraffic",
}
],
)
print("Endpoint Config Arn: " + create_endpoint_config_response["EndpointConfigArn"])
###Output
_____no_output_____
###Markdown
Finally, we'll create the endpoints using our endpoint configuration from the last step.
###Code
%%time
r_endpoint = "DEMO-r-endpoint-" + time.strftime("%Y%m%d%H%M", time.gmtime())
print(r_endpoint)
create_endpoint_response = boto3.client("sagemaker").create_endpoint(
EndpointName=r_endpoint, EndpointConfigName=r_endpoint_config
)
print(create_endpoint_response["EndpointArn"])
resp = boto3.client("sagemaker").describe_endpoint(EndpointName=r_endpoint)
status = resp["EndpointStatus"]
print("Status: " + status)
try:
boto3.client("sagemaker").get_waiter("endpoint_in_service").wait(EndpointName=r_endpoint)
finally:
resp = boto3.client("sagemaker").describe_endpoint(EndpointName=r_endpoint)
status = resp["EndpointStatus"]
print("Arn: " + resp["EndpointArn"])
print("Status: " + status)
if status != "InService":
raise Exception("Endpoint creation did not succeed")
###Output
_____no_output_____
###Markdown
--- PredictTo confirm our endpoints are working properly, let's try to invoke the endpoint._Note: The payload we're passing in the request is a CSV string with a header record, followed by multiple new lines. It also contains text columns, which the serving code converts to the set of indicator variables needed for our model predictions. Again, this is not a best practice for highly optimized code, however, it showcases the flexibility of bringing your own algorithm._
###Code
import pandas as pd
import json
iris_test = pd.read_csv("iris_test.csv")
runtime = boto3.Session().client("runtime.sagemaker")
%%time
# there is a limit of max 500 samples at a time for invoking endpoints
payload = iris_test.drop(["Sepal.Length"], axis=1).to_csv(index=False)
response = runtime.invoke_endpoint(EndpointName=r_endpoint, ContentType="text/csv", Body=payload)
result = json.loads(response["Body"].read().decode())
display(result)
###Output
_____no_output_____
###Markdown
We can see the result is a CSV of predictions for our target variable. Let's compare them to the actuals to see how our model did.
###Code
import matplotlib.pyplot as plt
import numpy as np
plt.scatter(iris_test["Sepal.Length"], np.fromstring(result[0], sep=","), alpha=0.4, s=50)
plt.xlabel("Sepal Length(Actual)")
plt.ylabel("Sepal Length(Prediction)")
x = np.linspace(*plt.xlim())
plt.plot(x, x, linestyle="--", color="g", linewidth=1)
plt.xlim(4, 8)
plt.ylim(4, 8)
plt.show()
###Output
_____no_output_____
###Markdown
(Optional) Clean-upIf you're ready to be done with this notebook, please run the cell below. This will remove the hosted endpoint you created and avoid any charges from a stray instance being left on.
###Code
boto3.client("sagemaker").delete_endpoint(EndpointName=r_endpoint)
###Output
_____no_output_____ |
src/plotting/rolling_window/TFBScoverage_rw_plots_single.ipynb | ###Markdown
Now do constitutive and variable promoter from Czechowski et al 2005
###Code
def add_genetype(coverage,gene_categories):
"""function to add gene type to the df, and remove random genes"""
select_genes = pd.read_table(gene_categories, sep='\t', header=None)
cols = ['AGI', 'gene_type']
select_genes.columns = cols
merged = pd.merge(select_genes, coverage, on='AGI', how='left')
# no_random = merged_renamed[merged_renamed.gene_type != 'random']
# no_random.reset_index(drop=True, inplace=True)
return merged
coverage_prom_types = add_genetype(coverage_df, Czechowski_gene_categories)
coverage_prom_types
rolling_rootshootintersect = windows_coords('Czechowski_genetypenocontrol','percentage_bases_covered','% bp covered',coverage_prom_types,promoter_bed,promoter_no_5UTR,
50,EPD_TSS_bed, includeEPDTSS=False,estimator='median', genetype='constitutive', genetype2='variable')
#figure legend:
# 1) Sliding windows of 100bp with 50bp offset were taken across each promoter.
# 2) Window 1 is the 100bp immediately upstream of the ATG. Window 2 is offset 50bp further upstream of this and so on.
# 3) The coordinate of the middle of each window was used for each data point on the plot x axis
# 4) % bp covered by TFBSs in each window were calculated. The mean % bp covered for each window position (ie. mean of all window 1s, mean of all window 2s etc) across all promoters was calculated and used for the y-axis
# 5) The x-axis range was set to the longest promoter in the dataset
# 6) 95% confidence intervals
rolling_rootchrom = windows_coords('Czechowski_genetypenocontrol','percentage_bases_covered','% bp covered',coverage_prom_types,promoter_bed,promoter_no_5UTR,
50,EPD_TSS_bed,chromatin_tissue_variable='percentage_bases_covered_root_chrom',
chromatin_tissue_variable_name='% open chromatin root',estimator='median', genetype='constitutive', genetype2='variable')
rolling_shootchrom = windows_coords('Czechowski_genetypenocontrol','percentage_bases_covered','% bp covered',coverage_prom_types,promoter_bed,promoter_no_5UTR,
50,EPD_TSS_bed,chromatin_tissue_variable='percentage_bases_covered_shoot_chrom',
chromatin_tissue_variable_name='% open chromatin shoot',estimator='median', genetype='constitutive', genetype2='variable')
rolling = windows_coords('Czechowski_genetypenocontrol','percentage_bases_covered','% bp covered',coverage_prom_types,promoter_bed,promoter_no_5UTR, 50,EPD_TSS_bed,x_range=500,estimator='mean', genetype='constitutive', genetype2='variable')
rolling_mean = windows_coords('Czechowski_genetypenocontrol','percentage_bases_covered','% bp covered',coverage_prom_types,promoter_bed,promoter_no_5UTR, 50,EPD_TSS_bed,x_range=1500,estimator='mean', genetype='constitutive', genetype2='variable')
rolling_incl_control = windows_coords('Czechowski_genetype','percentage_bases_covered','% bp covered',coverage_prom_types,promoter_bed,promoter_no_5UTR, 50,EPD_TSS_bed,x_range=1500,estimator='mean', genetype='constitutive', genetype2='variable', genetype3='control')
rolling_incl_control = windows_coords('Czechowski_genetype','percentage_bases_covered','% bp covered',coverage_prom_types,promoter_bed,promoter_no_5UTR, 50,EPD_TSS_bed,estimator='median',x_range=1500, genetype='constitutive', genetype2='variable', genetype3='control')
###Output
sample size in each category = 100
|
notebooks/components/ConnectedComponents.ipynb | ###Markdown
Connected ComponentsIn this notebook, we will use cuGraph to compute weakly and strongly connected components of a graph and display some useful information about the resulting components._Weakly connected component_ (WCC) is often a necessary pre-processing step for many graph algorithms. A dataset may contact several disconnected (sub-) graphs. Quite often, running a graph algorithm only on one component of a disconnected graph can lead to bugs which are not easy to trace._Strongly connected components_ (SCC) is used in the early stages of graph analysis to get an idea of a graph's structure.Notebook Credits* Original Authors: Kumar Aatish* Created: 08/13/2019* Last Edit: 10/28/2019RAPIDS Versions: 0.10.0 Test Hardware* GV100 32G, CUDA 10.0 IntroductionTo compute WCC for a graph in cuGraph we use:**cugraph.weakly_connected_components(G)**To compute SCC for a graph in cuGraph we use:**cugraph.strongly_connected_components(G)**Both of these calls have identical api: Input* __G__: cugraph.Graph objectReturns * __df__: a cudf.DataFrame object with two columns: * df['labels'][i]: Gives the label id of the i'th vertex * df['vertices'][i]: Gives the vertex id of the i'th vertex cuGraph Notice The current version of cuGraph has some limitations:* Vertex IDs need to be 32-bit integers.* Vertex IDs are expected to be contiguous integers starting from 0.cuGraph provides the renumber function to mitigate this problem. Input vertex IDs for the renumber function can be either 32-bit or 64-bit integers, can be non-contiguous, and can start from an arbitrary number. The renumber function maps the provided input vertex IDs to 32-bit contiguous integers starting from 0. cuGraph still requires the renumbered vertex IDs to be representable in 32-bit integers. These limitations are being addressed and will be fixed soon. Test DataWe will be using the Netscience dataset : *M. E. J. Newman, Finding community structure in networks using the eigenvectors of matrices, Preprint physics/0605087 (2006)*The graph netscience contains a coauthorship network of scientists working on network theory and experiment. The version given here contains all components of the network, for a total of 1589 scientists, with the the largest component of 379 scientists.Netscience Adjacency Matrix |NetScience Strongly Connected Components:---------------------------------------------|------------------------------------------------------------: |  Matrix plots above by Yifan Hu, AT&T Labs Visualization Group.
###Code
# Import needed libraries
import cugraph
import cudf
import numpy as np
from collections import OrderedDict
###Output
_____no_output_____
###Markdown
1. Read graph data from filecuGraph depends on cuDF for data loading and the initial Dataframe creation on the GPU.The data file contains an edge list, which represents the connection of a vertex to another. The source to destination pairs is in what is known as Coordinate Format (COO).In this test case the data in the test file is expressed in three columns, source, destination and the edge weight. While edge weight is relevant in other algorithms, cuGraph connected component calls do not make use of it and hence that column can be discarded from the dataframe.
###Code
# Test file
datafile='../data/netscience.csv'
# the datafile contains three columns,but we only want to use the first two.
# We will use the "usecols' feature of read_csv to ignore that column
gdf = cudf.read_csv(datafile, delimiter=' ', names=['src', 'dst', 'wgt'], dtype=['int32', 'int32', 'float32'], usecols=['src', 'dst'])
gdf.head()
###Output
_____no_output_____
###Markdown
2. Create a Graph from an edge list
###Code
# create a Graph using the source (src) and destination (dst) vertex pairs from the Dataframe
G = cugraph.Graph()
G.from_cudf_edgelist(gdf, source='src', destination='dst')
###Output
_____no_output_____
###Markdown
3a. Call Weakly Connected Components
###Code
# Call cugraph.weakly_connected_components on the dataframe
df = cugraph.weakly_connected_components(G)
df.head()
###Output
_____no_output_____
###Markdown
Get total number of weakly connected components
###Code
# Use groupby on the 'labels' column of the WCC output to get the counts of each connected component label
label_gby = df.groupby('labels')
label_count = label_gby.count()
print("Total number of components found : ", len(label_count))
###Output
Total number of components found : 396
###Markdown
Get size of the largest weakly connected component
###Code
# Call nlargest on the groupby result to get the row where the component count is the largest
largest_component = label_count.nlargest(n = 1, columns = 'vertices')
print("Size of the largest component is found to be : ", largest_component['vertices'][0])
###Output
Size of the largest component is found to be : 379
###Markdown
Output vertex ids belonging to a weakly connected component label
###Code
# Query the connected component output to display vertex ids that belong to a component of interest
expr = "labels == 1"
component = df.query(expr)
print("Vertex Ids that belong to component label 1 : ")
print(component)
###Output
Vertex Ids that belong to component label 1 :
labels vertices
0 1 0
1 1 1
946 1 946
1084 1 1084
###Markdown
3b. Call Strongly Connected Components
###Code
# Call cugraph.strongly_connected_components on the dataframe
df = cugraph.strongly_connected_components(G)
df.head()
###Output
_____no_output_____
###Markdown
Get total number of strongly connected components
###Code
# Use groupby on the 'labels' column of the SCC output to get the counts of each connected component label
label_gby = df.groupby('labels')
label_count = label_gby.count()
print("Total number of components found : ", len(label_count))
###Output
Total number of components found : 396
###Markdown
Get size of the largest strongly connected component
###Code
# Call nlargest on the groupby result to get the row where the component count is the largest
largest_component = label_count.nlargest(n = 1, columns = 'vertices')
print("Size of the largest component is found to be : ", largest_component['vertices'][0])
###Output
Size of the largest component is found to be : 379
###Markdown
Output vertex ids belonging to a strongly connected component label
###Code
# Query the connected component output to display vertex ids that belong to a component of interest
expr = "labels == 2"
component = df.query(expr)
print("Vertex Ids that belong to component label 2 : ")
print(component)
###Output
Vertex Ids that belong to component label 2 :
labels vertices
2 2 2
3 2 3
4 2 4
5 2 5
6 2 6
###Markdown
Connected ComponentsIn this notebook, we will use cuGraph to compute weakly and strongly connected components of a graph and display some useful information about the resulting components._Weakly connected component_ (WCC) is often a necessary pre-processing step for many graph algorithms. A dataset may contact several disconnected (sub-) graphs. Quite often, running a graph algorithm only on one component of a disconnected graph can lead to bugs which are not easy to trace._Strongly connected components_ (SCC) is used in the early stages of graph analysis to get an idea of a graph's structure.Notebook Credits* Original Authors: Kumar Aatish* Created: 08/13/2019* Last Edit: 08/16/2020RAPIDS Versions: 0.15 Test Hardware* GV100 32G, CUDA 10.2 Introduction Weakly Connected ComponentsTo compute WCC for a graph in cuGraph we use:**cugraph.weakly_connected_components(G)** Generate the weakly connected components and attach a component label to each vertex. Parameters ---------- G : cugraph.Graph cuGraph graph descriptor, should contain the connectivity information as an edge list (edge weights are not used for this algorithm). Currently, the graph should be undirected where an undirected edge is represented by a directed edge in both directions. The adjacency list will be computed if not already present. The number of vertices should fit into a 32b int. Returns ------- df : cudf.DataFrame df['labels'][i] gives the label id of the i'th vertex df['vertex'][i] gives the vertex id of the i'th vertex Strongly Connected ComponentsTo compute SCC for a graph in cuGraph we use:**cugraph.strongly_connected_components(G)** Generate the stronlgly connected components and attach a component label to each vertex. Parameters ---------- G : cugraph.Graph cuGraph graph descriptor, should contain the connectivity information as an edge list (edge weights are not used for this algorithm). The graph can be either directed or undirected where an undirected edge is represented by a directed edge in both directions. The adjacency list will be computed if not already present. The number of vertices should fit into a 32b int. Returns ------- df : cudf.DataFrame df['labels'][i] gives the label id of the i'th vertex df['vertex'][i] gives the vertex id of the i'th vertex Some notes about vertex IDs...* The current version of cuGraph requires that vertex IDs be representable as 32-bit integers, meaning graphs currently can contain at most 2^32 unique vertex IDs. However, this limitation is being actively addressed and a version of cuGraph that accommodates more than 2^32 vertices will be available in the near future.* cuGraph will automatically renumber graphs to an internal format consisting of a contiguous series of integers starting from 0, and convert back to the original IDs when returning data to the caller. If the vertex IDs of the data are already a contiguous series of integers starting from 0, the auto-renumbering step can be skipped for faster graph creation times. * To skip auto-renumbering, set the `renumber` boolean arg to `False` when calling the appropriate graph creation API (eg. `G.from_cudf_edgelist(gdf_r, source='src', destination='dst', renumber=False)`). * For more advanced renumbering support, see the examples in `structure/renumber.ipynb` and `structure/renumber-2.ipynb` Test DataWe will be using the Netscience dataset : *M. E. J. Newman, Finding community structure in networks using the eigenvectors of matrices, Preprint physics/0605087 (2006)*The graph netscience contains a coauthorship network of scientists working on network theory and experiment. The version given here contains all components of the network, for a total of 1589 scientists, with the the largest component of 379 scientists.Netscience Adjacency Matrix |NetScience Strongly Connected Components:---------------------------------------------|------------------------------------------------------------: |  Matrix plots above by Yifan Hu, AT&T Labs Visualization Group.
###Code
# Import needed libraries
import cugraph
import cudf
import numpy as np
###Output
_____no_output_____
###Markdown
1. Read graph data from filecuGraph depends on cuDF for data loading and the initial Dataframe creation on the GPU.The data file contains an edge list, which represents the connection of a vertex to another. The source to destination pairs is in what is known as Coordinate Format (COO).In this test case the data in the test file is expressed in three columns, source, destination and the edge weight. While edge weight is relevant in other algorithms, cuGraph connected component calls do not make use of it and hence that column can be discarded from the dataframe.
###Code
# Test file
datafile='../data/netscience.csv'
# the datafile contains three columns,but we only want to use the first two.
# We will use the "usecols' feature of read_csv to ignore that column
gdf = cudf.read_csv(datafile, delimiter=' ', names=['src', 'dst', 'wgt'], dtype=['int32', 'int32', 'float32'], usecols=['src', 'dst'])
gdf.head(5)
###Output
_____no_output_____
###Markdown
2. Create a Graph from an edge list
###Code
# create a Graph using the source (src) and destination (dst) vertex pairs from the Dataframe
G = cugraph.Graph()
G.from_cudf_edgelist(gdf, source='src', destination='dst')
###Output
_____no_output_____
###Markdown
3a. Call Weakly Connected Components
###Code
# Call cugraph.weakly_connected_components on the dataframe
df = cugraph.weakly_connected_components(G)
df.head(5)
###Output
_____no_output_____
###Markdown
Get total number of weakly connected components
###Code
# Use groupby on the 'labels' column of the WCC output to get the counts of each connected component label
label_gby = df.groupby('labels')
label_count = label_gby.count()
print("Total number of components found : ", len(label_count))
###Output
_____no_output_____
###Markdown
Get size of the largest weakly connected component
###Code
# Call nlargest on the groupby result to get the row where the component count is the largest
largest_component = label_count.nlargest(n = 1, columns = 'vertex')
print("Size of the largest component is found to be : ", largest_component['vertex'].iloc[0])
###Output
_____no_output_____
###Markdown
Output vertex ids belonging to a weakly connected component label
###Code
# Query the connected component output to display vertex ids that belong to a component of interest
expr = "labels == 1"
component = df.query(expr)
print("Vertex Ids that belong to component label 1 : ")
print(component)
###Output
_____no_output_____
###Markdown
3b. Call Strongly Connected Components
###Code
# Call cugraph.strongly_connected_components on the dataframe
df = cugraph.strongly_connected_components(G)
df.head(5)
###Output
_____no_output_____
###Markdown
Get total number of strongly connected components
###Code
# Use groupby on the 'labels' column of the SCC output to get the counts of each connected component label
label_gby = df.groupby('labels')
label_count = label_gby.count()
print("Total number of components found : ", len(label_count))
###Output
_____no_output_____
###Markdown
Get size of the largest strongly connected component
###Code
# Call nlargest on the groupby result to get the row where the component count is the largest
largest_component = label_count.nlargest(n = 1, columns = 'vertex')
print("Size of the largest component is found to be : ", largest_component['vertex'].iloc[0])
###Output
_____no_output_____
###Markdown
Output vertex ids belonging to a strongly connected component label
###Code
# Query the connected component output to display vertex ids that belong to a component of interest
expr = "labels == 2"
component = df.query(expr)
print("Vertex Ids that belong to component label 2 : ")
print(component)
###Output
_____no_output_____
###Markdown
Connected ComponentsIn this notebook, we will use cuGraph to compute weakly and strongly connected components of a graph and display some useful information about the resulting components._Weakly connected component_ (WCC) is often a necessary pre-processing step for many graph algorithms. A dataset may contact several disconnected (sub-) graphs. Quite often, running a graph algorithm only on one component of a disconnected graph can lead to bugs which are not easy to trace._Strongly connected components_ (SCC) is used in the early stages of graph analysis to get an idea of a graph's structure.Notebook Credits* Original Authors: Kumar Aatish* Created: 08/13/2019* Last Edit: 06/10/2020RAPIDS Versions: 0.15 Test Hardware* GV100 32G, CUDA 10.2 Introduction Weakly Connected ComponentsTo compute WCC for a graph in cuGraph we use:**cugraph.weakly_connected_components(G)** Generate the weakly connected components and attach a component label to each vertex. Parameters ---------- G : cugraph.Graph cuGraph graph descriptor, should contain the connectivity information as an edge list (edge weights are not used for this algorithm). Currently, the graph should be undirected where an undirected edge is represented by a directed edge in both directions. The adjacency list will be computed if not already present. The number of vertices should fit into a 32b int. Returns ------- df : cudf.DataFrame df['labels'][i] gives the label id of the i'th vertex df['vertices'][i] gives the vertex id of the i'th vertex Strongly Connected ComponentsTo compute SCC for a graph in cuGraph we use:**cugraph.strongly_connected_components(G)** Generate the stronlgly connected components and attach a component label to each vertex. Parameters ---------- G : cugraph.Graph cuGraph graph descriptor, should contain the connectivity information as an edge list (edge weights are not used for this algorithm). The graph can be either directed or undirected where an undirected edge is represented by a directed edge in both directions. The adjacency list will be computed if not already present. The number of vertices should fit into a 32b int. Returns ------- df : cudf.DataFrame df['labels'][i] gives the label id of the i'th vertex df['vertices'][i] gives the vertex id of the i'th vertex cuGraph Notice The current version of cuGraph has some limitations:* Vertex IDs need to be 32-bit integers.* Vertex IDs are expected to be contiguous integers starting from 0.cuGraph provides the renumber function to mitigate this problem. Input vertex IDs for the renumber function can be either 32-bit or 64-bit integers, can be non-contiguous, and can start from an arbitrary number. The renumber function maps the provided input vertex IDs to 32-bit contiguous integers starting from 0. cuGraph still requires the renumbered vertex IDs to be representable in 32-bit integers. These limitations are being addressed and will be fixed soon. Test DataWe will be using the Netscience dataset : *M. E. J. Newman, Finding community structure in networks using the eigenvectors of matrices, Preprint physics/0605087 (2006)*The graph netscience contains a coauthorship network of scientists working on network theory and experiment. The version given here contains all components of the network, for a total of 1589 scientists, with the the largest component of 379 scientists.Netscience Adjacency Matrix |NetScience Strongly Connected Components:---------------------------------------------|------------------------------------------------------------: |  Matrix plots above by Yifan Hu, AT&T Labs Visualization Group.
###Code
# Import needed libraries
import cugraph
import cudf
import numpy as np
###Output
_____no_output_____
###Markdown
1. Read graph data from filecuGraph depends on cuDF for data loading and the initial Dataframe creation on the GPU.The data file contains an edge list, which represents the connection of a vertex to another. The source to destination pairs is in what is known as Coordinate Format (COO).In this test case the data in the test file is expressed in three columns, source, destination and the edge weight. While edge weight is relevant in other algorithms, cuGraph connected component calls do not make use of it and hence that column can be discarded from the dataframe.
###Code
# Test file
datafile='../data/netscience.csv'
# the datafile contains three columns,but we only want to use the first two.
# We will use the "usecols' feature of read_csv to ignore that column
gdf = cudf.read_csv(datafile, delimiter=' ', names=['src', 'dst', 'wgt'], dtype=['int32', 'int32', 'float32'], usecols=['src', 'dst'])
gdf.head(5)
###Output
_____no_output_____
###Markdown
2. Create a Graph from an edge list
###Code
# create a Graph using the source (src) and destination (dst) vertex pairs from the Dataframe
G = cugraph.Graph()
G.from_cudf_edgelist(gdf, source='src', destination='dst')
###Output
_____no_output_____
###Markdown
3a. Call Weakly Connected Components
###Code
# Call cugraph.weakly_connected_components on the dataframe
df = cugraph.weakly_connected_components(G)
df.head(5)
###Output
_____no_output_____
###Markdown
Get total number of weakly connected components
###Code
# Use groupby on the 'labels' column of the WCC output to get the counts of each connected component label
label_gby = df.groupby('labels')
label_count = label_gby.count()
print("Total number of components found : ", len(label_count))
###Output
_____no_output_____
###Markdown
Get size of the largest weakly connected component
###Code
# Call nlargest on the groupby result to get the row where the component count is the largest
largest_component = label_count.nlargest(n = 1, columns = 'vertices')
print("Size of the largest component is found to be : ", largest_component['vertices'].iloc[0])
###Output
_____no_output_____
###Markdown
Output vertex ids belonging to a weakly connected component label
###Code
# Query the connected component output to display vertex ids that belong to a component of interest
expr = "labels == 1"
component = df.query(expr)
print("Vertex Ids that belong to component label 1 : ")
print(component)
###Output
_____no_output_____
###Markdown
3b. Call Strongly Connected Components
###Code
# Call cugraph.strongly_connected_components on the dataframe
df = cugraph.strongly_connected_components(G)
df.head(5)
###Output
_____no_output_____
###Markdown
Get total number of strongly connected components
###Code
# Use groupby on the 'labels' column of the SCC output to get the counts of each connected component label
label_gby = df.groupby('labels')
label_count = label_gby.count()
print("Total number of components found : ", len(label_count))
###Output
_____no_output_____
###Markdown
Get size of the largest strongly connected component
###Code
# Call nlargest on the groupby result to get the row where the component count is the largest
largest_component = label_count.nlargest(n = 1, columns = 'vertices')
print("Size of the largest component is found to be : ", largest_component['vertices'].iloc[0])
###Output
_____no_output_____
###Markdown
Output vertex ids belonging to a strongly connected component label
###Code
# Query the connected component output to display vertex ids that belong to a component of interest
expr = "labels == 2"
component = df.query(expr)
print("Vertex Ids that belong to component label 2 : ")
print(component)
###Output
_____no_output_____
###Markdown
Connected ComponentsIn this notebook, we will use cuGraph to compute weakly and strongly connected components of a graph and display some useful information about the resulting components._Weakly connected component_ (WCC) is often a necessary pre-processing step for many graph algorithms. A dataset may contact several disconnected (sub-) graphs. Quite often, running a graph algorithm only on one component of a disconnected graph can lead to bugs which are not easy to trace._Strongly connected components_ (SCC) is used in the early stages of graph analysis to get an idea of a graph's structure.Notebook Credits* Original Authors: Kumar Aatish* Created: 08/13/2019* Last Edit: 03/03/2020RAPIDS Versions: 0.13 Test Hardware* GV100 32G, CUDA 10.2 Introduction Weakly Connected ComponentsTo compute WCC for a graph in cuGraph we use:**cugraph.weakly_connected_components(G)** Generate the weakly connected components and attach a component label to each vertex. Parameters ---------- G : cugraph.Graph cuGraph graph descriptor, should contain the connectivity information as an edge list (edge weights are not used for this algorithm). Currently, the graph should be undirected where an undirected edge is represented by a directed edge in both directions. The adjacency list will be computed if not already present. The number of vertices should fit into a 32b int. Returns ------- df : cudf.DataFrame df['labels'][i] gives the label id of the i'th vertex df['vertices'][i] gives the vertex id of the i'th vertex Strongly Connected ComponentsTo compute SCC for a graph in cuGraph we use:**cugraph.strongly_connected_components(G)** Generate the stronlgly connected components and attach a component label to each vertex. Parameters ---------- G : cugraph.Graph cuGraph graph descriptor, should contain the connectivity information as an edge list (edge weights are not used for this algorithm). The graph can be either directed or undirected where an undirected edge is represented by a directed edge in both directions. The adjacency list will be computed if not already present. The number of vertices should fit into a 32b int. Returns ------- df : cudf.DataFrame df['labels'][i] gives the label id of the i'th vertex df['vertices'][i] gives the vertex id of the i'th vertex cuGraph Notice The current version of cuGraph has some limitations:* Vertex IDs need to be 32-bit integers.* Vertex IDs are expected to be contiguous integers starting from 0.cuGraph provides the renumber function to mitigate this problem. Input vertex IDs for the renumber function can be either 32-bit or 64-bit integers, can be non-contiguous, and can start from an arbitrary number. The renumber function maps the provided input vertex IDs to 32-bit contiguous integers starting from 0. cuGraph still requires the renumbered vertex IDs to be representable in 32-bit integers. These limitations are being addressed and will be fixed soon. Test DataWe will be using the Netscience dataset : *M. E. J. Newman, Finding community structure in networks using the eigenvectors of matrices, Preprint physics/0605087 (2006)*The graph netscience contains a coauthorship network of scientists working on network theory and experiment. The version given here contains all components of the network, for a total of 1589 scientists, with the the largest component of 379 scientists.Netscience Adjacency Matrix |NetScience Strongly Connected Components:---------------------------------------------|------------------------------------------------------------: |  Matrix plots above by Yifan Hu, AT&T Labs Visualization Group.
###Code
# Import needed libraries
import cugraph
import cudf
import numpy as np
from collections import OrderedDict
###Output
_____no_output_____
###Markdown
1. Read graph data from filecuGraph depends on cuDF for data loading and the initial Dataframe creation on the GPU.The data file contains an edge list, which represents the connection of a vertex to another. The source to destination pairs is in what is known as Coordinate Format (COO).In this test case the data in the test file is expressed in three columns, source, destination and the edge weight. While edge weight is relevant in other algorithms, cuGraph connected component calls do not make use of it and hence that column can be discarded from the dataframe.
###Code
# Test file
datafile='../data/netscience.csv'
# the datafile contains three columns,but we only want to use the first two.
# We will use the "usecols' feature of read_csv to ignore that column
gdf = cudf.read_csv(datafile, delimiter=' ', names=['src', 'dst', 'wgt'], dtype=['int32', 'int32', 'float32'], usecols=['src', 'dst'])
gdf.head()
###Output
_____no_output_____
###Markdown
2. Create a Graph from an edge list
###Code
# create a Graph using the source (src) and destination (dst) vertex pairs from the Dataframe
G = cugraph.Graph()
G.from_cudf_edgelist(gdf, source='src', destination='dst')
###Output
_____no_output_____
###Markdown
3a. Call Weakly Connected Components
###Code
# Call cugraph.weakly_connected_components on the dataframe
df = cugraph.weakly_connected_components(G)
df.head()
###Output
_____no_output_____
###Markdown
Get total number of weakly connected components
###Code
# Use groupby on the 'labels' column of the WCC output to get the counts of each connected component label
label_gby = df.groupby('labels')
label_count = label_gby.count()
print("Total number of components found : ", len(label_count))
###Output
Total number of components found : 268
###Markdown
Get size of the largest weakly connected component
###Code
# Call nlargest on the groupby result to get the row where the component count is the largest
largest_component = label_count.nlargest(n = 1, columns = 'vertices')
print("Size of the largest component is found to be : ", largest_component['vertices'][0])
###Output
Size of the largest component is found to be : 379
###Markdown
Output vertex ids belonging to a weakly connected component label
###Code
# Query the connected component output to display vertex ids that belong to a component of interest
expr = "labels == 1"
component = df.query(expr)
print("Vertex Ids that belong to component label 1 : ")
print(component)
###Output
Vertex Ids that belong to component label 1 :
labels vertices
0 1 0
1 1 1
883 1 883
1009 1 1009
###Markdown
3b. Call Strongly Connected Components
###Code
# Call cugraph.strongly_connected_components on the dataframe
df = cugraph.strongly_connected_components(G)
df.head()
###Output
_____no_output_____
###Markdown
Get total number of strongly connected components
###Code
# Use groupby on the 'labels' column of the SCC output to get the counts of each connected component label
label_gby = df.groupby('labels')
label_count = label_gby.count()
print("Total number of components found : ", len(label_count))
###Output
Total number of components found : 268
###Markdown
Get size of the largest strongly connected component
###Code
# Call nlargest on the groupby result to get the row where the component count is the largest
largest_component = label_count.nlargest(n = 1, columns = 'vertices')
print("Size of the largest component is found to be : ", largest_component['vertices'][0])
###Output
Size of the largest component is found to be : 379
###Markdown
Output vertex ids belonging to a strongly connected component label
###Code
# Query the connected component output to display vertex ids that belong to a component of interest
expr = "labels == 2"
component = df.query(expr)
print("Vertex Ids that belong to component label 2 : ")
print(component)
###Output
Vertex Ids that belong to component label 2 :
labels vertices
2 2 2
3 2 3
4 2 4
5 2 5
6 2 6
###Markdown
Connected ComponentsIn this notebook, we will use cuGraph to compute weakly and strongly connected components of a graph and display some useful information about the resulting components._Weakly connected component_ (WCC) is often a necessary pre-processing step for many graph algorithms. A dataset may contact several disconnected (sub-) graphs. Quite often, running a graph algorithm only on one component of a disconnected graph can lead to bugs which are not easy to trace._Strongly connected components_ (SCC) is used in the early stages of graph analysis to get an idea of a graph's structure._Notebook Credits_| Author Credit | Date | Update | cuGraph Version | Test Hardware || --------------|------------|------------------|-----------------|--------------------|| Kumar Aatish | 08/13/2019 | created | 0.15 | GV100, CUDA 10.2 || Brad Rees | 10/18/2021 | updated | 21.12 nightly | GV100, CUDA 11.4 | Introduction Weakly Connected ComponentsTo compute WCC for a graph in cuGraph we use:**cugraph.weakly_connected_components(G)** Generate the weakly connected components and attach a component label to each vertex. Parameters ---------- G : cugraph.Graph cuGraph graph descriptor, should contain the connectivity information as an edge list (edge weights are not used for this algorithm). Currently, the graph should be undirected where an undirected edge is represented by a directed edge in both directions. The adjacency list will be computed if not already present. The number of vertices should fit into a 32b int. Returns ------- df : cudf.DataFrame df['labels'][i] gives the label id of the i'th vertex df['vertex'][i] gives the vertex id of the i'th vertex Strongly Connected ComponentsTo compute SCC for a graph in cuGraph we use:**cugraph.strongly_connected_components(G)** Generate the stronlgly connected components and attach a component label to each vertex. Parameters ---------- G : cugraph.Graph cuGraph graph descriptor, should contain the connectivity information as an edge list (edge weights are not used for this algorithm). The graph can be either directed or undirected where an undirected edge is represented by a directed edge in both directions. The adjacency list will be computed if not already present. The number of vertices should fit into a 32b int. Returns ------- df : cudf.DataFrame df['labels'][i] gives the label id of the i'th vertex df['vertex'][i] gives the vertex id of the i'th vertex Some notes about vertex IDs...* The current version of cuGraph requires that vertex IDs be representable as 32-bit integers, meaning graphs currently can contain at most 2^32 unique vertex IDs. However, this limitation is being actively addressed and a version of cuGraph that accommodates more than 2^32 vertices will be available in the near future.* cuGraph will automatically renumber graphs to an internal format consisting of a contiguous series of integers starting from 0, and convert back to the original IDs when returning data to the caller. If the vertex IDs of the data are already a contiguous series of integers starting from 0, the auto-renumbering step can be skipped for faster graph creation times. * To skip auto-renumbering, set the `renumber` boolean arg to `False` when calling the appropriate graph creation API (eg. `G.from_cudf_edgelist(gdf_r, source='src', destination='dst', renumber=False)`). * For more advanced renumbering support, see the examples in `structure/renumber.ipynb` and `structure/renumber-2.ipynb` Test DataWe will be using the Netscience dataset : *M. E. J. Newman, Finding community structure in networks using the eigenvectors of matrices, Preprint physics/0605087 (2006)*The graph netscience contains a coauthorship network of scientists working on network theory and experiment. The version given here contains all components of the network, for a total of 1589 scientists, with the the largest component of 379 scientists.Netscience Adjacency Matrix |NetScience Strongly Connected Components:---------------------------------------------|------------------------------------------------------------: |  Matrix plots above by Yifan Hu, AT&T Labs Visualization Group.
###Code
# Import needed libraries
import cugraph
import cudf
import numpy as np
###Output
_____no_output_____
###Markdown
1. Read graph data from filecuGraph depends on cuDF for data loading and the initial Dataframe creation on the GPU.The data file contains an edge list, which represents the connection of a vertex to another. The source to destination pairs is in what is known as Coordinate Format (COO).In this test case the data in the test file is expressed in three columns, source, destination and the edge weight. While edge weight is relevant in other algorithms, cuGraph connected component calls do not make use of it and hence that column can be discarded from the dataframe.
###Code
# Test file
datafile='../data/netscience.csv'
# the datafile contains three columns,but we only want to use the first two.
# We will use the "usecols' feature of read_csv to ignore that column
gdf = cudf.read_csv(datafile, delimiter=' ', names=['src', 'dst', 'wgt'], dtype=['int32', 'int32', 'float32'], usecols=['src', 'dst'])
gdf.head(5)
###Output
_____no_output_____
###Markdown
2. Create a Graph from an edge list
###Code
# create a Graph using the source (src) and destination (dst) vertex pairs from the Dataframe
G = cugraph.Graph()
G.from_cudf_edgelist(gdf, source='src', destination='dst')
###Output
_____no_output_____
###Markdown
3a. Call Weakly Connected Components
###Code
# Call cugraph.weakly_connected_components on the dataframe
df = cugraph.weakly_connected_components(G)
df.head(5)
###Output
_____no_output_____
###Markdown
Get total number of weakly connected components
###Code
# Use groupby on the 'labels' column of the WCC output to get the counts of each connected component label
label_gby = df.groupby('labels')
label_count = label_gby.count()
print("Total number of components found : ", len(label_count))
###Output
_____no_output_____
###Markdown
Output the sizes of the top 10 largest weakly connected component
###Code
# Call nlargest on the groupby result to get the row where the component count is the largest
# NOTE: this will change the value of "vertex" to be the count and "labels" to be an index
largest_component = label_count.nlargest(n = 10, columns = 'vertex')
print("Size of the top 10 largest components are: ")
print(largest_component)
###Output
_____no_output_____
###Markdown
Output vertex ids belonging to a weakly connected component label
###Code
# Query the connected component output to display vertex ids that belong to a component of interest
# picking label 106 from above to reduce amount of data printed
expr = "labels == 106"
component = df.query(expr)
print("Vertex Ids that belong to component label 106 : ")
print(component)
###Output
_____no_output_____
###Markdown
3b. Call Strongly Connected Components
###Code
# Call cugraph.strongly_connected_components on the dataframe
df = cugraph.strongly_connected_components(G)
df.head(5)
###Output
_____no_output_____
###Markdown
Get total number of strongly connected components
###Code
# Use groupby on the 'labels' column of the SCC output to get the counts of each connected component label
label_gby = df.groupby('labels')
label_count = label_gby.count()
print("Total number of components found : ", len(label_count))
###Output
_____no_output_____
###Markdown
Get the top 10 largest strongly connected component
###Code
# Call nlargest on the groupby result to get the row where the component count is the largest
largest_component = label_count.nlargest(n = 10, columns = 'vertex')
print("Size of the top 10 largest components are: ")
print(largest_component)
###Output
_____no_output_____
###Markdown
Output vertex ids belonging to a strongly connected component label
###Code
# Query the connected component output to display vertex ids that belong to a component of interest
expr = "labels == 66"
component = df.query(expr)
print("Vertex Ids that belong to component label 66 : ")
print(component)
###Output
_____no_output_____
###Markdown
Connected ComponentsIn this notebook, we will use cuGraph to compute weakly and strongly connected components of a graph and display some useful information about the resulting components._Weakly connected component_ (WCC) is often a necessary pre-processing step for many graph algorithms. A dataset may contact several disconnected (sub-) graphs. Quite often, running a graph algorithm only on one component of a disconnected graph can lead to bugs which are not easy to trace._Strongly connected components_ (SCC) is used in the early stages of graph analysis to get an idea of a graph's structure.Notebook Credits* Original Authors: Kumar Aatish* Created: 08/13/2019* Last Edit: 03/03/2020RAPIDS Versions: 0.13 Test Hardware* GV100 32G, CUDA 10.2 Introduction Weakly Connected ComponentsTo compute WCC for a graph in cuGraph we use:**cugraph.weakly_connected_components(G)** Generate the weakly connected components and attach a component label to each vertex. Parameters ---------- G : cugraph.Graph cuGraph graph descriptor, should contain the connectivity information as an edge list (edge weights are not used for this algorithm). Currently, the graph should be undirected where an undirected edge is represented by a directed edge in both directions. The adjacency list will be computed if not already present. The number of vertices should fit into a 32b int. Returns ------- df : cudf.DataFrame df['labels'][i] gives the label id of the i'th vertex df['vertices'][i] gives the vertex id of the i'th vertex Strongly Connected ComponentsTo compute SCC for a graph in cuGraph we use:**cugraph.strongly_connected_components(G)** Generate the stronlgly connected components and attach a component label to each vertex. Parameters ---------- G : cugraph.Graph cuGraph graph descriptor, should contain the connectivity information as an edge list (edge weights are not used for this algorithm). The graph can be either directed or undirected where an undirected edge is represented by a directed edge in both directions. The adjacency list will be computed if not already present. The number of vertices should fit into a 32b int. Returns ------- df : cudf.DataFrame df['labels'][i] gives the label id of the i'th vertex df['vertices'][i] gives the vertex id of the i'th vertex cuGraph Notice The current version of cuGraph has some limitations:* Vertex IDs need to be 32-bit integers.* Vertex IDs are expected to be contiguous integers starting from 0.cuGraph provides the renumber function to mitigate this problem. Input vertex IDs for the renumber function can be either 32-bit or 64-bit integers, can be non-contiguous, and can start from an arbitrary number. The renumber function maps the provided input vertex IDs to 32-bit contiguous integers starting from 0. cuGraph still requires the renumbered vertex IDs to be representable in 32-bit integers. These limitations are being addressed and will be fixed soon. Test DataWe will be using the Netscience dataset : *M. E. J. Newman, Finding community structure in networks using the eigenvectors of matrices, Preprint physics/0605087 (2006)*The graph netscience contains a coauthorship network of scientists working on network theory and experiment. The version given here contains all components of the network, for a total of 1589 scientists, with the the largest component of 379 scientists.Netscience Adjacency Matrix |NetScience Strongly Connected Components:---------------------------------------------|------------------------------------------------------------: |  Matrix plots above by Yifan Hu, AT&T Labs Visualization Group.
###Code
# Import needed libraries
import cugraph
import cudf
import numpy as np
from collections import OrderedDict
###Output
_____no_output_____
###Markdown
1. Read graph data from filecuGraph depends on cuDF for data loading and the initial Dataframe creation on the GPU.The data file contains an edge list, which represents the connection of a vertex to another. The source to destination pairs is in what is known as Coordinate Format (COO).In this test case the data in the test file is expressed in three columns, source, destination and the edge weight. While edge weight is relevant in other algorithms, cuGraph connected component calls do not make use of it and hence that column can be discarded from the dataframe.
###Code
# Test file
datafile='../data/netscience.csv'
# the datafile contains three columns,but we only want to use the first two.
# We will use the "usecols' feature of read_csv to ignore that column
gdf = cudf.read_csv(datafile, delimiter=' ', names=['src', 'dst', 'wgt'], dtype=['int32', 'int32', 'float32'], usecols=['src', 'dst'])
gdf.head()
###Output
_____no_output_____
###Markdown
2. Create a Graph from an edge list
###Code
# create a Graph using the source (src) and destination (dst) vertex pairs from the Dataframe
G = cugraph.Graph()
G.from_cudf_edgelist(gdf, source='src', destination='dst')
###Output
_____no_output_____
###Markdown
3a. Call Weakly Connected Components
###Code
# Call cugraph.weakly_connected_components on the dataframe
df = cugraph.weakly_connected_components(G)
df.head()
###Output
_____no_output_____
###Markdown
Get total number of weakly connected components
###Code
# Use groupby on the 'labels' column of the WCC output to get the counts of each connected component label
label_gby = df.groupby('labels')
label_count = label_gby.count()
print("Total number of components found : ", len(label_count))
###Output
_____no_output_____
###Markdown
Get size of the largest weakly connected component
###Code
# Call nlargest on the groupby result to get the row where the component count is the largest
largest_component = label_count.nlargest(n = 1, columns = 'vertices')
print("Size of the largest component is found to be : ", largest_component['vertices'].iloc[0])
###Output
_____no_output_____
###Markdown
Output vertex ids belonging to a weakly connected component label
###Code
# Query the connected component output to display vertex ids that belong to a component of interest
expr = "labels == 1"
component = df.query(expr)
print("Vertex Ids that belong to component label 1 : ")
print(component)
###Output
_____no_output_____
###Markdown
3b. Call Strongly Connected Components
###Code
# Call cugraph.strongly_connected_components on the dataframe
df = cugraph.strongly_connected_components(G)
df.head()
###Output
_____no_output_____
###Markdown
Get total number of strongly connected components
###Code
# Use groupby on the 'labels' column of the SCC output to get the counts of each connected component label
label_gby = df.groupby('labels')
label_count = label_gby.count()
print("Total number of components found : ", len(label_count))
###Output
_____no_output_____
###Markdown
Get size of the largest strongly connected component
###Code
# Call nlargest on the groupby result to get the row where the component count is the largest
largest_component = label_count.nlargest(n = 1, columns = 'vertices')
print("Size of the largest component is found to be : ", largest_component['vertices'].iloc[0])
###Output
_____no_output_____
###Markdown
Output vertex ids belonging to a strongly connected component label
###Code
# Query the connected component output to display vertex ids that belong to a component of interest
expr = "labels == 2"
component = df.query(expr)
print("Vertex Ids that belong to component label 2 : ")
print(component)
###Output
_____no_output_____
###Markdown
Connected ComponentsIn this notebook, we will use cuGraph to compute weakly and strongly connected components of a graph and display some useful information about the resulting components._Weakly connected component_ (WCC) is often a necessary pre-processing step for many graph algorithms. A dataset may contact several disconnected (sub-) graphs. Quite often, running a graph algorithm only on one component of a disconnected graph can lead to bugs which are not easy to trace._Strongly connected components_ (SCC) is used in the early stages of graph analysis to get an idea of a graph's structure._Notebook Credits_| Author Credit | Date | Update | cuGraph Version | Test Hardware || --------------|------------|------------------|-----------------|--------------------|| Kumar Aatish | 08/13/2019 | created | 0.15 | GV100, CUDA 10.2 || Brad Rees | 10/18/2021 | updated | 21.12 nightly | GV100, CUDA 11.4 | Introduction Weakly Connected ComponentsTo compute WCC for a graph in cuGraph we use:**cugraph.weakly_connected_components(G)** Generate the weakly connected components and attach a component label to each vertex. Parameters ---------- G : cugraph.Graph cuGraph graph descriptor, should contain the connectivity information as an edge list (edge weights are not used for this algorithm). Currently, the graph should be undirected where an undirected edge is represented by a directed edge in both directions. The adjacency list will be computed if not already present. The number of vertices should fit into a 32b int. Returns ------- df : cudf.DataFrame df['labels'][i] gives the label id of the i'th vertex df['vertex'][i] gives the vertex id of the i'th vertex Strongly Connected ComponentsTo compute SCC for a graph in cuGraph we use:**cugraph.strongly_connected_components(G)** Generate the stronlgly connected components and attach a component label to each vertex. Parameters ---------- G : cugraph.Graph cuGraph graph descriptor, should contain the connectivity information as an edge list (edge weights are not used for this algorithm). The graph can be either directed or undirected where an undirected edge is represented by a directed edge in both directions. The adjacency list will be computed if not already present. The number of vertices should fit into a 32b int. Returns ------- df : cudf.DataFrame df['labels'][i] gives the label id of the i'th vertex df['vertex'][i] gives the vertex id of the i'th vertex Some notes about vertex IDs...* The current version of cuGraph requires that vertex IDs be representable as 32-bit integers, meaning graphs currently can contain at most 2^32 unique vertex IDs. However, this limitation is being actively addressed and a version of cuGraph that accommodates more than 2^32 vertices will be available in the near future.* cuGraph will automatically renumber graphs to an internal format consisting of a contiguous series of integers starting from 0, and convert back to the original IDs when returning data to the caller. If the vertex IDs of the data are already a contiguous series of integers starting from 0, the auto-renumbering step can be skipped for faster graph creation times. * To skip auto-renumbering, set the `renumber` boolean arg to `False` when calling the appropriate graph creation API (eg. `G.from_cudf_edgelist(gdf_r, source='src', destination='dst', renumber=False)`). * For more advanced renumbering support, see the examples in `structure/renumber.ipynb` and `structure/renumber-2.ipynb` Test DataWe will be using the Netscience dataset : *M. E. J. Newman, Finding community structure in networks using the eigenvectors of matrices, Preprint physics/0605087 (2006)*The graph netscience contains a coauthorship network of scientists working on network theory and experiment. The version given here contains all components of the network, for a total of 1589 scientists, with the the largest component of 379 scientists.Netscience Adjacency Matrix |NetScience Strongly Connected Components:---------------------------------------------|------------------------------------------------------------: |  Matrix plots above by Yifan Hu, AT&T Labs Visualization Group.
###Code
# Import needed libraries
import cugraph
import cudf
import numpy as np
###Output
_____no_output_____
###Markdown
1. Read graph data from filecuGraph depends on cuDF for data loading and the initial Dataframe creation on the GPU.The data file contains an edge list, which represents the connection of a vertex to another. The source to destination pairs is in what is known as Coordinate Format (COO).In this test case the data in the test file is expressed in three columns, source, destination and the edge weight. While edge weight is relevant in other algorithms, cuGraph connected component calls do not make use of it and hence that column can be discarded from the dataframe.
###Code
# Test file
datafile='../data/netscience.csv'
# the datafile contains three columns, but we only want to use the first two.
# We will use the "usecols' feature of read_csv to ignore that column
gdf = cudf.read_csv(datafile, delimiter=' ', names=['src', 'dst', 'wgt'], dtype=['int32', 'int32', 'float32'], usecols=['src', 'dst'])
gdf.head(5)
###Output
_____no_output_____
###Markdown
2. Create a Graph from an edge list
###Code
# create a Graph using the source (src) and destination (dst) vertex pairs from the Dataframe
G = cugraph.Graph()
G.from_cudf_edgelist(gdf, source='src', destination='dst')
###Output
_____no_output_____
###Markdown
3a. Call Weakly Connected Components
###Code
# Call cugraph.weakly_connected_components on the dataframe
df = cugraph.weakly_connected_components(G)
df.head(5)
###Output
_____no_output_____
###Markdown
Get total number of weakly connected components
###Code
# Use groupby on the 'labels' column of the WCC output to get the counts of each connected component label
label_gby = df.groupby('labels')
label_count = label_gby.count()
print("Total number of components found : ", len(label_count))
###Output
_____no_output_____
###Markdown
Output the sizes of the top 10 largest weakly connected component
###Code
# Call nlargest on the groupby result to get the row where the component count is the largest
# NOTE: this will change the value of "vertex" to be the count and "labels" to be an index
largest_component = label_count.nlargest(n = 10, columns = 'vertex')
print("Size of the top 10 largest components are: ")
print(largest_component)
###Output
_____no_output_____
###Markdown
Output vertex ids belonging to a weakly connected component label
###Code
# Query the connected component output to display vertex ids that belong to a component of interest
# picking label 106 from above to reduce amount of data printed
expr = "labels == 106"
component = df.query(expr)
print("Vertex Ids that belong to component label 106 : ")
print(component)
###Output
_____no_output_____
###Markdown
3b. Call Strongly Connected Components
###Code
# Call cugraph.strongly_connected_components on the dataframe
df = cugraph.strongly_connected_components(G)
df.head(5)
###Output
_____no_output_____
###Markdown
Get total number of strongly connected components
###Code
# Use groupby on the 'labels' column of the SCC output to get the counts of each connected component label
label_gby = df.groupby('labels')
label_count = label_gby.count()
print("Total number of components found : ", len(label_count))
###Output
_____no_output_____
###Markdown
Get the top 10 largest strongly connected component
###Code
# Call nlargest on the groupby result to get the row where the component count is the largest
largest_component = label_count.nlargest(n = 10, columns = 'vertex')
print("Size of the top 10 largest components are: ")
print(largest_component)
###Output
_____no_output_____
###Markdown
Output vertex ids belonging to a strongly connected component label
###Code
# Query the connected component output to display vertex ids that belong to a component of interest
expr = "labels == 66"
component = df.query(expr)
print("Vertex Ids that belong to component label 66 : ")
print(component)
###Output
_____no_output_____
###Markdown
Connected ComponentsIn this notebook, we will use cuGraph to compute weakly and strongly connected components of a graph and display some useful information about the resulting components._Weakly connected component_ (WCC) is often a necessary pre-processing step for many graph algorithms. A dataset may contact several disconnected (sub-) graphs. Quite often, running a graph algorithm only on one component of a disconnected graph can lead to bugs which are not easy to trace._Strongly connected components_ (SCC) is used in the early stages of graph analysis to get an idea of a graph's structure.Notebook Credits* Original Authors: Kumar Aatish* Created: 08/13/2019* Last Edit: 08/16/2020RAPIDS Versions: 0.15 Test Hardware* GV100 32G, CUDA 10.2 Introduction Weakly Connected ComponentsTo compute WCC for a graph in cuGraph we use:**cugraph.weakly_connected_components(G)** Generate the weakly connected components and attach a component label to each vertex. Parameters ---------- G : cugraph.Graph cuGraph graph descriptor, should contain the connectivity information as an edge list (edge weights are not used for this algorithm). Currently, the graph should be undirected where an undirected edge is represented by a directed edge in both directions. The adjacency list will be computed if not already present. The number of vertices should fit into a 32b int. Returns ------- df : cudf.DataFrame df['labels'][i] gives the label id of the i'th vertex df['vertices'][i] gives the vertex id of the i'th vertex Strongly Connected ComponentsTo compute SCC for a graph in cuGraph we use:**cugraph.strongly_connected_components(G)** Generate the stronlgly connected components and attach a component label to each vertex. Parameters ---------- G : cugraph.Graph cuGraph graph descriptor, should contain the connectivity information as an edge list (edge weights are not used for this algorithm). The graph can be either directed or undirected where an undirected edge is represented by a directed edge in both directions. The adjacency list will be computed if not already present. The number of vertices should fit into a 32b int. Returns ------- df : cudf.DataFrame df['labels'][i] gives the label id of the i'th vertex df['vertices'][i] gives the vertex id of the i'th vertex Some notes about vertex IDs...* The current version of cuGraph requires that vertex IDs be representable as 32-bit integers, meaning graphs currently can contain at most 2^32 unique vertex IDs. However, this limitation is being actively addressed and a version of cuGraph that accommodates more than 2^32 vertices will be available in the near future.* cuGraph will automatically renumber graphs to an internal format consisting of a contiguous series of integers starting from 0, and convert back to the original IDs when returning data to the caller. If the vertex IDs of the data are already a contiguous series of integers starting from 0, the auto-renumbering step can be skipped for faster graph creation times. * To skip auto-renumbering, set the `renumber` boolean arg to `False` when calling the appropriate graph creation API (eg. `G.from_cudf_edgelist(gdf_r, source='src', destination='dst', renumber=False)`). * For more advanced renumbering support, see the examples in `structure/renumber.ipynb` and `structure/renumber-2.ipynb` Test DataWe will be using the Netscience dataset : *M. E. J. Newman, Finding community structure in networks using the eigenvectors of matrices, Preprint physics/0605087 (2006)*The graph netscience contains a coauthorship network of scientists working on network theory and experiment. The version given here contains all components of the network, for a total of 1589 scientists, with the the largest component of 379 scientists.Netscience Adjacency Matrix |NetScience Strongly Connected Components:---------------------------------------------|------------------------------------------------------------: |  Matrix plots above by Yifan Hu, AT&T Labs Visualization Group.
###Code
# Import needed libraries
import cugraph
import cudf
import numpy as np
###Output
_____no_output_____
###Markdown
1. Read graph data from filecuGraph depends on cuDF for data loading and the initial Dataframe creation on the GPU.The data file contains an edge list, which represents the connection of a vertex to another. The source to destination pairs is in what is known as Coordinate Format (COO).In this test case the data in the test file is expressed in three columns, source, destination and the edge weight. While edge weight is relevant in other algorithms, cuGraph connected component calls do not make use of it and hence that column can be discarded from the dataframe.
###Code
# Test file
datafile='../data/netscience.csv'
# the datafile contains three columns,but we only want to use the first two.
# We will use the "usecols' feature of read_csv to ignore that column
gdf = cudf.read_csv(datafile, delimiter=' ', names=['src', 'dst', 'wgt'], dtype=['int32', 'int32', 'float32'], usecols=['src', 'dst'])
gdf.head(5)
###Output
_____no_output_____
###Markdown
2. Create a Graph from an edge list
###Code
# create a Graph using the source (src) and destination (dst) vertex pairs from the Dataframe
G = cugraph.Graph()
G.from_cudf_edgelist(gdf, source='src', destination='dst')
###Output
_____no_output_____
###Markdown
3a. Call Weakly Connected Components
###Code
# Call cugraph.weakly_connected_components on the dataframe
df = cugraph.weakly_connected_components(G)
df.head(5)
###Output
_____no_output_____
###Markdown
Get total number of weakly connected components
###Code
# Use groupby on the 'labels' column of the WCC output to get the counts of each connected component label
label_gby = df.groupby('labels')
label_count = label_gby.count()
print("Total number of components found : ", len(label_count))
###Output
_____no_output_____
###Markdown
Get size of the largest weakly connected component
###Code
# Call nlargest on the groupby result to get the row where the component count is the largest
largest_component = label_count.nlargest(n = 1, columns = 'vertices')
print("Size of the largest component is found to be : ", largest_component['vertices'].iloc[0])
###Output
_____no_output_____
###Markdown
Output vertex ids belonging to a weakly connected component label
###Code
# Query the connected component output to display vertex ids that belong to a component of interest
expr = "labels == 1"
component = df.query(expr)
print("Vertex Ids that belong to component label 1 : ")
print(component)
###Output
_____no_output_____
###Markdown
3b. Call Strongly Connected Components
###Code
# Call cugraph.strongly_connected_components on the dataframe
df = cugraph.strongly_connected_components(G)
df.head(5)
###Output
_____no_output_____
###Markdown
Get total number of strongly connected components
###Code
# Use groupby on the 'labels' column of the SCC output to get the counts of each connected component label
label_gby = df.groupby('labels')
label_count = label_gby.count()
print("Total number of components found : ", len(label_count))
###Output
_____no_output_____
###Markdown
Get size of the largest strongly connected component
###Code
# Call nlargest on the groupby result to get the row where the component count is the largest
largest_component = label_count.nlargest(n = 1, columns = 'vertices')
print("Size of the largest component is found to be : ", largest_component['vertices'].iloc[0])
###Output
_____no_output_____
###Markdown
Output vertex ids belonging to a strongly connected component label
###Code
# Query the connected component output to display vertex ids that belong to a component of interest
expr = "labels == 2"
component = df.query(expr)
print("Vertex Ids that belong to component label 2 : ")
print(component)
###Output
_____no_output_____ |
38-introduction_basic_network_concepts.ipynb | ###Markdown
Basic Network Concepts Basic Vocabulary
###Code
net1 = nx.Graph()
net1.add_node("Alice")
net1.add_node("Bob")
net1.add_node("Chuck")
net1.add_edge("Alice", "Bob")
net1.add_edge("Bob", "Chuck")
net1.nodes()
net1.edges()
nx.draw(net1, with_labels=True)
###Output
_____no_output_____
###Markdown
Edges
###Code
net2 = nx.DiGraph()
net2.add_node("Alice")
net2.add_node("Bob")
net2.add_node("Chuck")
net2.add_edge("Alice", "Bob")
net2.add_edge("Bob", "Chuck")
net2.edges()
nx.draw(net2, with_labels=True)
###Output
_____no_output_____
###Markdown
Edge Weights
###Code
net3 = nx.Graph()
net3.add_node("Alice")
net3.add_node("Bob")
net3.add_node("Chuck")
net3.add_edge("Alice", "Bob", weight=2)
net3.add_edge("Bob", "Chuck", weight=1)
net3.edges()
net3.edges(data=True)
###Output
_____no_output_____
###Markdown
Apollo 13 Movie Network
###Code
appolo = nx.Graph()
appolo.add_node("Tom Hanks")
appolo.add_node("Bill Paxton")
appolo.add_node("Gary Sinise")
appolo.add_node("Ed Harris")
appolo.add_node("Kevin Bacon")
appolo.add_edge("Tom Hanks", "Bill Paxton", movies = ["Magnificent Desolation: Walking on the Moon"])
appolo.add_edge("Tom Hanks", "Gary Sinise", movies = ["The Green Mile"])
appolo.add_edge("Bill Paxton", "Gary Sinise", movies = ["Magnificent Desolation: Walking on the Moon"])
appolo.add_edge("Gary Sinise", "Kevin Bacon", movies = ["Beyond All Boundaries"])
appolo.add_edge("Gary Sinise", "Ed Harris", movies = ["The Human Stain"])
nx.draw(appolo, with_labels = True)
###Output
_____no_output_____
###Markdown
Neighbors
###Code
list(appolo.neighbors('Tom Hanks'))
###Output
_____no_output_____
###Markdown
Adjacency Matrix
###Code
nx.adj_matrix(appolo).todense()
appolo.nodes()
appolo.edges()
###Output
_____no_output_____
###Markdown
Cliques
###Code
nx.cliques_containing_node(appolo)
###Output
_____no_output_____
###Markdown
Connectedness
###Code
nx.shortest_path(appolo, "Tom Hanks", "Kevin Bacon")
appolo.add_node("Hani Safadi")
nx.draw(appolo, with_labels = True)
try:
nx.shortest_path(appolo, "Tom Hanks", "Hani Safadi")
except:
print("No path exists")
###Output
No path exists
|
07 Teaching Machines/donow/Moriarty_Class7_DoNow.ipynb | ###Markdown
Apply logistic regression to categorize whether a county had high mortality rate due to contamination 1. Import the necessary packages to read in the data, plot, and create a logistic regression model
###Code
import pandas as pd
%matplotlib inline
import numpy as np
from sklearn.linear_model import LogisticRegression
###Output
_____no_output_____
###Markdown
2. Read in the hanford.csv file in the `data/` folder
###Code
df=pd.read_csv('/home/sean/git/algorithms/class7/data/hanford.csv')
###Output
_____no_output_____
###Markdown
3. Calculate the basic descriptive statistics on the data
###Code
df.describe()
###Output
_____no_output_____
###Markdown
4. Find a reasonable threshold to say exposure is high and recode the data
###Code
df
df['Mort_High']=df['Mortality'].apply(lambda x:1 if x>=147.1 else 0)
df['Expo_High']=df['Exposure'].apply(lambda x:1 if x>=3.41 else 0)
df
###Output
_____no_output_____
###Markdown
5. Create a logistic regression model
###Code
lm = LogisticRegression()
x = np.asarray(df[['Expo_High']])
y = np.asarray(df['Mort_High'])
lm = lm.fit(x,y)
###Output
_____no_output_____
###Markdown
6. Predict whether the mortality rate (Cancer per 100,000 man years) will be high at an exposure level of 50
###Code
lm
###Output
_____no_output_____ |
src/jupyter_notebooks/jupyter-fai-a/keras_iter_InceptionResNetV2.ipynb | ###Markdown
优化算法:使用Keras提供的其他优化器,如梯度下降,看在其他算法下模型参数对模型训练和过拟合的速度有怎样的影响。损失函数:尝试使用Keras其他可用的损失函数,探究选用其他的损失函数是否可以提升模型的性能。学习率与迭代次数更新策略更大的Batch Size:使用更大的Batch Size意味着模型在训练集和测试集上的数据操作规模更大了
###Code
#from __future__ import absolute_import
#from __future__ import division
#from __future__ import print_function
import os
import cv2
import random
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from keras.applications.inception_v3 import preprocess_input
from keras.applications import imagenet_utils
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import load_img
from keras.layers import *
#from keras.layers import Input
from keras.models import *
#from keras.models import Model
#from keras.layers import Input, Dense
#a = Input(shape=(32,))
#b = Dense(32)(a)
#model = Model(inputs=a, outputs=b)
from keras.callbacks import *
from keras.optimizers import *
from keras.regularizers import *
from keras import initializers
from keras.applications import *
#from keras.utils import multi_gpu_model
#from keras.applications import ResNet50
#from keras.applications import VGG16
#from keras.applications import VGG19
#from keras.applications import Xception # TensorFlow ONLY
#from keras.applications import InceptionResNetV2
#from keras.applications import InceptionV3
#tf.keras.applications.inception_v3.InceptionV3
#tf.keras.applications.inception_resnet_v2.InceptionResNetV2
####################################################################
#设置GPU
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"]="0,1"
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
#设置项
#看具体的模型参数设置在:https://www.tensorflow.org/api_docs/python/tf/keras/applications
MODELS = {"InceptionResNetV2":InceptionResNetV2,"Xception":Xception}
#MODELS = {"NASNetLarge":NASNetLarge,"VGG16":VGG16}
#"InceptionV3":InceptionV3,"DenseNet121":DenseNet121,
# "DenseNet169":DenseNet169,"DenseNet201":DenseNet201,"Xception":Xception,
# "InceptionResNetV2":InceptionResNetV2,
#"ResNet50":ResNet50,
#"VGG16":VGG16,"VGG16":VGG19,"NASNetMobile":NASNetMobile
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
classes = ['collar_design_labels', 'neckline_design_labels', 'skirt_length_labels',
'sleeve_length_labels', 'neck_design_labels', 'lapel_design_labels',
'pant_length_labels','coat_length_labels']
#classes = ['collar_design_labels', 'neckline_design_labels', 'neck_design_labels']
#classes = ['pant_length_labels','coat_length_labels']
fai_result = []
for KEY, MODLE in MODELS.items():
#
#为299*299,设置如下
ppreprocess = preprocess_input
if KEY in ["InceptionV3","Xception", "InceptionResNetV2"]:
width = 299
elif KEY == "NASNetLarge":
width = 331
else:
width = 224
ppreprocess = imagenet_utils.preprocess_input
print('######################在{0}下训练8个分类器####################'.format(KEY))
for cur_class in classes:
print('#######{0}:{1}####################'.format(KEY,cur_class ))
df_train = pd.read_csv('../train/Annotations/{0}.csv'.format(cur_class), header=None)
df_train.columns = ['image_id', 'class', 'label']
df_load = df_train.copy()
df_load.reset_index(inplace=True)
del df_load['index']
print("选择的属性为:{0}, 种类的为:{1},样本数: {2}".format(cur_class , len(df_load['label'][0]),len(df_load)))
n = len(df_load)
n_class = len(df_load['label'][0])
prefix_cls = cur_class.split('_')[0]
X = np.zeros((n, width, width, 3), dtype=np.uint8)
y = np.zeros((n, n_class), dtype=np.uint8)
for i in range(n):
tmp_label = df_load['label'][i]
if len(tmp_label) > n_class:
print(df_load['image_id'][i])
X[i] = cv2.resize(cv2.imread('../train/{0}'.format(df_load['image_id'][i])), (width, width))
y[i][tmp_label.find('y')] = 1
print("数据装载到内存完毕:{0},{1}".format(KEY,cur_class))
#plt.figure(figsize=(12, 7))
#for i in range(8):
#random_index = random.randint(0, n-1)
#plt.subplot(2, 4, i+1)
#plt.imshow(X[random_index][:,:,::-1])
#plt.title(y[random_index])
#plt.savefig('../images/{0}/{0}_{1}.png'.format(prefix_cls, KEY),bbox_inches='tight')
#设置模型的finetune细节
cnn_model = MODLE(include_top=False, input_shape=(width, width, 3), weights='imagenet',pooling='avg')
inputs = Input((width, width, 3))
x = inputs
x = Lambda(ppreprocess, name='preprocessing')(x)
x = cnn_model(x)
#下面是新加的层
#x = GlobalAveragePooling2D()(x)
x = Dropout(0.5)(x)
#x = Flatten(name='flatten')(x)
x = Dense(1024, activation='relu', name='fc1')(x)
# n_class为对应属性的分类个数
x = Dense(512, activation='relu', kernel_initializer=initializers.he_uniform(seed=None),name='fc2')(x)
x = Dropout(0.5)(x)
x = Dense(n_class, activation='softmax', name='softmax')(x)
model = Model(inputs, x)
#X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.05, random_state=50)
#sgd = SGD(lr=learning_rate, decay=learning_rate/nb_epoch, momentum=0.9, nesterov=True)
#adam = optimizers.Adam(lr=1e-4)
#optimizer=SGD(lr=0.01, momentum=0.9, nesterov=True))
adam = Adam(lr=0.001)
#多GPU训练,因为keras设计的自动保存最好模型,但是多GPU训练,其save()就没法用了
#model = multi_gpu_model(model, 2)
model.compile(optimizer=adam,
loss='categorical_crossentropy',
metrics=['accuracy'])
# Callback that implements learning rate schedule
#schedule = Step([20], [1e-4, 1e-6])
#history = model.fit(X_train, Y_train,
# batch_size=batch_size, nb_epoch=nb_epoch, validation_data=(X_test,Y_test),
# callbacks=[
# schedule,
# keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0,save_best_only=True, mode='auto')
# 该回调函数将在每个epoch后保存模型到filepath
#keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0,save_best_only=True, mode='auto')
# 当监测值不再改善时,该回调函数将中止训练.
#当early stop被激活(如发现loss相比上一个epoch训练没有下降),则经过patience个epoch后停止训练
#keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')
#设置训练完之后,最好的模型保存路径
# checkpointer = ModelCheckpoint(filepath='../models/{0}/{0}_{1}.best.h5'.format(prefix_cls,KEY), verbose=1,
# save_best_only=True)
#训练开始,并保存训练过程的loss和acc变化
h = model.fit(X, y, batch_size=16, epochs=42,
# callbacks=[EarlyStopping(patience=10), checkpointer],
shuffle=True)
plt.figure(figsize=(10, 4))
plt.subplot(1, 2, 1)
plt.plot(h.history['loss'])
plt.legend(['loss'])
plt.ylabel('loss')
plt.xlabel('epoch')
plt.title('{0}_{1}_loss'.format(prefix_cls, KEY))
plt.subplot(1, 2, 2)
plt.plot(h.history['acc'])
plt.legend(['acc'])
plt.ylabel('acc')
plt.xlabel('epoch')
plt.title('{0}_{1}_accuracy'.format(prefix_cls, KEY))
#保存训练损失和准确率变化的图像
plt.savefig('../models/{0}/{0}_{1}.png'.format(prefix_cls, KEY),bbox_inches='tight')
print("开始保存模型")
model.save_weights('../models/{0}/{0}_{1}vv2.best.h5'.format(prefix_cls, KEY))
#测试集上预测并输出结果
df_test = pd.read_csv('../test/Tests/question.csv', header=None)
df_test.columns = ['image_id', 'class', 'x']
del df_test['x']
df_load = df_test[(df_test['class'] == cur_class)].copy()
df_load.reset_index(inplace=True)
del df_load['index']
n = len(df_load)
X_test = np.zeros((n, width, width, 3), dtype=np.uint8)
for i in range(n):
X_test[i] = cv2.resize(cv2.imread('../test/{0}'.format(df_load['image_id'][i])), (width, width))
test_np = model.predict(X_test, batch_size=256)
result = []
for i, row in df_load.iterrows():
tmp_list = test_np[i]
tmp_result = ''
for tmp_ret in tmp_list:
tmp_result += '{:.4f};'.format(tmp_ret)
result.append(tmp_result[:-1])
df_load['result'] = result
df_load.to_csv('../result/{1}/{0}_{1}vv2.csv'.format(prefix_cls, KEY), header=None, index=False)
print(fai_result)
print(fai_result)
###Output
_____no_output_____ |
Subsets and Splits